summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--windhoek/MAINTAINER1
-rw-r--r--windhoek/Makeconf.local10
-rw-r--r--windhoek/Makefile36
-rw-r--r--windhoek/TODO8
-rw-r--r--windhoek/cdrom/cdrom.c3676
-rw-r--r--windhoek/components.c407
-rw-r--r--windhoek/default.ld213
-rw-r--r--windhoek/genhd.c132
-rw-r--r--windhoek/ide/ide-cd.h131
-rw-r--r--windhoek/ide/ide-disk.c750
-rw-r--r--windhoek/ide/ide-disk.h30
-rw-r--r--windhoek/ide/ide-dma.c507
-rw-r--r--windhoek/ide/ide-floppy.h39
-rw-r--r--windhoek/ide/ide-gd.c409
-rw-r--r--windhoek/ide/ide-gd.h44
-rw-r--r--windhoek/ide/ide-generic.c206
-rw-r--r--windhoek/ide/ide-io.c1228
-rw-r--r--windhoek/ide/ide-iops.c1215
-rw-r--r--windhoek/ide/ide-lib.c423
-rw-r--r--windhoek/ide/ide-park.c124
-rw-r--r--windhoek/ide/ide-pci-generic.c195
-rw-r--r--windhoek/ide/ide-pio-blacklist.c94
-rw-r--r--windhoek/ide/ide-pm.c239
-rw-r--r--windhoek/ide/ide-pnp.c107
-rw-r--r--windhoek/ide/ide-probe.c1739
-rw-r--r--windhoek/ide/ide-proc.c701
-rw-r--r--windhoek/ide/ide-sysfs.c125
-rw-r--r--windhoek/ide/ide-taskfile.c695
-rw-r--r--windhoek/ide/ide.c554
-rw-r--r--windhoek/ide/local.h11
-rw-r--r--windhoek/ide/piix.c482
-rw-r--r--windhoek/ide/setup-pci.c694
-rw-r--r--windhoek/include/Makefile9
-rw-r--r--windhoek/include/types.h135
-rw-r--r--windhoek/main.c88
-rw-r--r--windhoek/partitions/Kconfig251
-rw-r--r--windhoek/partitions/Makefile20
-rw-r--r--windhoek/partitions/acorn.c555
-rw-r--r--windhoek/partitions/acorn.h14
-rw-r--r--windhoek/partitions/amiga.c130
-rw-r--r--windhoek/partitions/amiga.h6
-rw-r--r--windhoek/partitions/atari.c149
-rw-r--r--windhoek/partitions/atari.h34
-rw-r--r--windhoek/partitions/check.c639
-rw-r--r--windhoek/partitions/check.h30
-rw-r--r--windhoek/partitions/efi.c631
-rw-r--r--windhoek/partitions/efi.h130
-rw-r--r--windhoek/partitions/ibm.c220
-rw-r--r--windhoek/partitions/ibm.h1
-rw-r--r--windhoek/partitions/karma.c57
-rw-r--r--windhoek/partitions/karma.h8
-rw-r--r--windhoek/partitions/ldm.c1551
-rw-r--r--windhoek/partitions/ldm.h215
-rw-r--r--windhoek/partitions/mac.c132
-rw-r--r--windhoek/partitions/mac.h44
-rw-r--r--windhoek/partitions/msdos.c529
-rw-r--r--windhoek/partitions/msdos.h8
-rw-r--r--windhoek/partitions/osf.c78
-rw-r--r--windhoek/partitions/osf.h7
-rw-r--r--windhoek/partitions/sgi.c82
-rw-r--r--windhoek/partitions/sgi.h8
-rw-r--r--windhoek/partitions/sun.c122
-rw-r--r--windhoek/partitions/sun.h8
-rw-r--r--windhoek/partitions/sysv68.c92
-rw-r--r--windhoek/partitions/sysv68.h1
-rw-r--r--windhoek/partitions/ultrix.c48
-rw-r--r--windhoek/partitions/ultrix.h5
-rw-r--r--windhoek/windhoek_local.h112
68 files changed, 21374 insertions, 0 deletions
diff --git a/windhoek/MAINTAINER b/windhoek/MAINTAINER
new file mode 100644
index 00000000..b94bec5c
--- /dev/null
+++ b/windhoek/MAINTAINER
@@ -0,0 +1 @@
+mailaddr doebel@os.inf.tu-dresden.de
diff --git a/windhoek/Makeconf.local b/windhoek/Makeconf.local
new file mode 100644
index 00000000..d6594dc7
--- /dev/null
+++ b/windhoek/Makeconf.local
@@ -0,0 +1,10 @@
+SYSTEMS = x86-l4v2
+ARCH = x86
+SYSTEM = x86-l4v2
+
+DDEKITLIBDIR = /root/hurd/libddekit/
+DDEKITINCDIR = /root/hurd/libddekit/include
+DDE26LIBDIR = /root/hurd/libdde_linux26/lib/src
+OBJ_BASE = /root/hurd/libdde_linux26/build
+
+L4LIBDIR = .
diff --git a/windhoek/Makefile b/windhoek/Makefile
new file mode 100644
index 00000000..447562ca
--- /dev/null
+++ b/windhoek/Makefile
@@ -0,0 +1,36 @@
+PKGDIR ?= ../libdde_linux26
+L4DIR ?= $(PKGDIR)
+
+SYSTEMS = x86-l4v2
+
+DEFAULT_RELOC = 0x00a00000
+
+include Makeconf.local
+#-include $(PKGDIR_OBJ)/../dde/Makeconf
+
+TARGET = windhoek
+
+IDE_SRC = ide.c ide-disk.c ide-generic.c ide-io.c ide-park.c \
+ ide-iops.c ide-lib.c ide-pm.c ide-pnp.c ide-probe.c ide-taskfile.c \
+ setup-pci.c ide-pci-generic.c ide-pio-blacklist.c ide-sysfs.c \
+ piix.c ide-dma.c ide-gd.c
+IDEFILES = $(addprefix ide/, $(IDE_SRC))
+
+CDROM_SRC = #cdrom.c
+CDROMFILES = $(addprefix cdrom/, $(CDROM_SRC))
+
+PARTITION_SRC = msdos.c check.c
+PARTITIONFILES = $(addprefix partitions/, $(PARTITION_SRC))
+
+SRC_C = main.c genhd.c components.c
+SRC_C += $(IDEFILES) \
+ $(CDROMFILES) \
+ $(PARTITIONFILES)
+
+LIBS += --whole-archive -ldde_linux26_block --no-whole-archive -ldde_linux26.o \
+ -lddekit
+
+# DDE configuration
+include $(L4DIR)/Makeconf
+
+include $(L4DIR)/mk/prog.mk
diff --git a/windhoek/TODO b/windhoek/TODO
new file mode 100644
index 00000000..61506fe3
--- /dev/null
+++ b/windhoek/TODO
@@ -0,0 +1,8 @@
+* implement notifications (and tickets)
+* test and fix scatter-gather I/O
+* support multiple dataspaces per client
+ * clients can add dataspaces to an open connection
+ * SG I/O should be possible across all dataspaces
+ of a connection
+* replace IDE by libATA
+* add USB Storage
diff --git a/windhoek/cdrom/cdrom.c b/windhoek/cdrom/cdrom.c
new file mode 100644
index 00000000..cceace61
--- /dev/null
+++ b/windhoek/cdrom/cdrom.c
@@ -0,0 +1,3676 @@
+/* linux/drivers/cdrom/cdrom.c
+ Copyright (c) 1996, 1997 David A. van Leeuwen.
+ Copyright (c) 1997, 1998 Erik Andersen <andersee@debian.org>
+ Copyright (c) 1998, 1999 Jens Axboe <axboe@image.dk>
+
+ May be copied or modified under the terms of the GNU General Public
+ License. See linux/COPYING for more information.
+
+ Uniform CD-ROM driver for Linux.
+ See Documentation/cdrom/cdrom-standard.tex for usage information.
+
+ The routines in the file provide a uniform interface between the
+ software that uses CD-ROMs and the various low-level drivers that
+ actually talk to the hardware. Suggestions are welcome.
+ Patches that work are more welcome though. ;-)
+
+ To Do List:
+ ----------------------------------
+
+ -- Modify sysctl/proc interface. I plan on having one directory per
+ drive, with entries for outputing general drive information, and sysctl
+ based tunable parameters such as whether the tray should auto-close for
+ that drive. Suggestions (or patches) for this welcome!
+
+
+ Revision History
+ ----------------------------------
+ 1.00 Date Unknown -- David van Leeuwen <david@tm.tno.nl>
+ -- Initial version by David A. van Leeuwen. I don't have a detailed
+ changelog for the 1.x series, David?
+
+2.00 Dec 2, 1997 -- Erik Andersen <andersee@debian.org>
+ -- New maintainer! As David A. van Leeuwen has been too busy to activly
+ maintain and improve this driver, I am now carrying on the torch. If
+ you have a problem with this driver, please feel free to contact me.
+
+ -- Added (rudimentary) sysctl interface. I realize this is really weak
+ right now, and is _very_ badly implemented. It will be improved...
+
+ -- Modified CDROM_DISC_STATUS so that it is now incorporated into
+ the Uniform CD-ROM driver via the cdrom_count_tracks function.
+ The cdrom_count_tracks function helps resolve some of the false
+ assumptions of the CDROM_DISC_STATUS ioctl, and is also used to check
+ for the correct media type when mounting or playing audio from a CD.
+
+ -- Remove the calls to verify_area and only use the copy_from_user and
+ copy_to_user stuff, since these calls now provide their own memory
+ checking with the 2.1.x kernels.
+
+ -- Major update to return codes so that errors from low-level drivers
+ are passed on through (thanks to Gerd Knorr for pointing out this
+ problem).
+
+ -- Made it so if a function isn't implemented in a low-level driver,
+ ENOSYS is now returned instead of EINVAL.
+
+ -- Simplified some complex logic so that the source code is easier to read.
+
+ -- Other stuff I probably forgot to mention (lots of changes).
+
+2.01 to 2.11 Dec 1997-Jan 1998
+ -- TO-DO! Write changelogs for 2.01 to 2.12.
+
+2.12 Jan 24, 1998 -- Erik Andersen <andersee@debian.org>
+ -- Fixed a bug in the IOCTL_IN and IOCTL_OUT macros. It turns out that
+ copy_*_user does not return EFAULT on error, but instead returns the number
+ of bytes not copied. I was returning whatever non-zero stuff came back from
+ the copy_*_user functions directly, which would result in strange errors.
+
+2.13 July 17, 1998 -- Erik Andersen <andersee@debian.org>
+ -- Fixed a bug in CDROM_SELECT_SPEED where you couldn't lower the speed
+ of the drive. Thanks to Tobias Ringstr|m <tori@prosolvia.se> for pointing
+ this out and providing a simple fix.
+ -- Fixed the procfs-unload-module bug with the fill_inode procfs callback.
+ thanks to Andrea Arcangeli
+ -- Fixed it so that the /proc entry now also shows up when cdrom is
+ compiled into the kernel. Before it only worked when loaded as a module.
+
+ 2.14 August 17, 1998 -- Erik Andersen <andersee@debian.org>
+ -- Fixed a bug in cdrom_media_changed and handling of reporting that
+ the media had changed for devices that _don't_ implement media_changed.
+ Thanks to Grant R. Guenther <grant@torque.net> for spotting this bug.
+ -- Made a few things more pedanticly correct.
+
+2.50 Oct 19, 1998 - Jens Axboe <axboe@image.dk>
+ -- New maintainers! Erik was too busy to continue the work on the driver,
+ so now Chris Zwilling <chris@cloudnet.com> and Jens Axboe <axboe@image.dk>
+ will do their best to follow in his footsteps
+
+ 2.51 Dec 20, 1998 - Jens Axboe <axboe@image.dk>
+ -- Check if drive is capable of doing what we ask before blindly changing
+ cdi->options in various ioctl.
+ -- Added version to proc entry.
+
+ 2.52 Jan 16, 1999 - Jens Axboe <axboe@image.dk>
+ -- Fixed an error in open_for_data where we would sometimes not return
+ the correct error value. Thanks Huba Gaspar <huba@softcell.hu>.
+ -- Fixed module usage count - usage was based on /proc/sys/dev
+ instead of /proc/sys/dev/cdrom. This could lead to an oops when other
+ modules had entries in dev. Feb 02 - real bug was in sysctl.c where
+ dev would be removed even though it was used. cdrom.c just illuminated
+ that bug.
+
+ 2.53 Feb 22, 1999 - Jens Axboe <axboe@image.dk>
+ -- Fixup of several ioctl calls, in particular CDROM_SET_OPTIONS has
+ been "rewritten" because capabilities and options aren't in sync. They
+ should be...
+ -- Added CDROM_LOCKDOOR ioctl. Locks the door and keeps it that way.
+ -- Added CDROM_RESET ioctl.
+ -- Added CDROM_DEBUG ioctl. Enable debug messages on-the-fly.
+ -- Added CDROM_GET_CAPABILITY ioctl. This relieves userspace programs
+ from parsing /proc/sys/dev/cdrom/info.
+
+ 2.54 Mar 15, 1999 - Jens Axboe <axboe@image.dk>
+ -- Check capability mask from low level driver when counting tracks as
+ per suggestion from Corey J. Scotts <cstotts@blue.weeg.uiowa.edu>.
+
+ 2.55 Apr 25, 1999 - Jens Axboe <axboe@image.dk>
+ -- autoclose was mistakenly checked against CDC_OPEN_TRAY instead of
+ CDC_CLOSE_TRAY.
+ -- proc info didn't mask against capabilities mask.
+
+ 3.00 Aug 5, 1999 - Jens Axboe <axboe@image.dk>
+ -- Unified audio ioctl handling across CD-ROM drivers. A lot of the
+ code was duplicated before. Drives that support the generic packet
+ interface are now being fed packets from here instead.
+ -- First attempt at adding support for MMC2 commands - for DVD and
+ CD-R(W) drives. Only the DVD parts are in now - the interface used is
+ the same as for the audio ioctls.
+ -- ioctl cleanups. if a drive couldn't play audio, it didn't get
+ a change to perform device specific ioctls as well.
+ -- Defined CDROM_CAN(CDC_XXX) for checking the capabilities.
+ -- Put in sysctl files for autoclose, autoeject, check_media, debug,
+ and lock.
+ -- /proc/sys/dev/cdrom/info has been updated to also contain info about
+ CD-Rx and DVD capabilities.
+ -- Now default to checking media type.
+ -- CDROM_SEND_PACKET ioctl added. The infrastructure was in place for
+ doing this anyway, with the generic_packet addition.
+
+ 3.01 Aug 6, 1999 - Jens Axboe <axboe@image.dk>
+ -- Fix up the sysctl handling so that the option flags get set
+ correctly.
+ -- Fix up ioctl handling so the device specific ones actually get
+ called :).
+
+ 3.02 Aug 8, 1999 - Jens Axboe <axboe@image.dk>
+ -- Fixed volume control on SCSI drives (or others with longer audio
+ page).
+ -- Fixed a couple of DVD minors. Thanks to Andrew T. Veliath
+ <andrewtv@usa.net> for telling me and for having defined the various
+ DVD structures and ioctls in the first place! He designed the original
+ DVD patches for ide-cd and while I rearranged and unified them, the
+ interface is still the same.
+
+ 3.03 Sep 1, 1999 - Jens Axboe <axboe@image.dk>
+ -- Moved the rest of the audio ioctls from the CD-ROM drivers here. Only
+ CDROMREADTOCENTRY and CDROMREADTOCHDR are left.
+ -- Moved the CDROMREADxxx ioctls in here.
+ -- Defined the cdrom_get_last_written and cdrom_get_next_block as ioctls
+ and exported functions.
+ -- Erik Andersen <andersen@xmission.com> modified all SCMD_ commands
+ to now read GPCMD_ for the new generic packet interface. All low level
+ drivers are updated as well.
+ -- Various other cleanups.
+
+ 3.04 Sep 12, 1999 - Jens Axboe <axboe@image.dk>
+ -- Fixed a couple of possible memory leaks (if an operation failed and
+ we didn't free the buffer before returning the error).
+ -- Integrated Uniform CD Changer handling from Richard Sharman
+ <rsharman@pobox.com>.
+ -- Defined CD_DVD and CD_CHANGER log levels.
+ -- Fixed the CDROMREADxxx ioctls.
+ -- CDROMPLAYTRKIND uses the GPCMD_PLAY_AUDIO_MSF command - too few
+ drives supported it. We lose the index part, however.
+ -- Small modifications to accommodate opens of /dev/hdc1, required
+ for ide-cd to handle multisession discs.
+ -- Export cdrom_mode_sense and cdrom_mode_select.
+ -- init_cdrom_command() for setting up a cgc command.
+
+ 3.05 Oct 24, 1999 - Jens Axboe <axboe@image.dk>
+ -- Changed the interface for CDROM_SEND_PACKET. Before it was virtually
+ impossible to send the drive data in a sensible way.
+ -- Lowered stack usage in mmc_ioctl(), dvd_read_disckey(), and
+ dvd_read_manufact.
+ -- Added setup of write mode for packet writing.
+ -- Fixed CDDA ripping with cdda2wav - accept much larger requests of
+ number of frames and split the reads in blocks of 8.
+
+ 3.06 Dec 13, 1999 - Jens Axboe <axboe@image.dk>
+ -- Added support for changing the region of DVD drives.
+ -- Added sense data to generic command.
+
+ 3.07 Feb 2, 2000 - Jens Axboe <axboe@suse.de>
+ -- Do same "read header length" trick in cdrom_get_disc_info() as
+ we do in cdrom_get_track_info() -- some drive don't obey specs and
+ fail if they can't supply the full Mt Fuji size table.
+ -- Deleted stuff related to setting up write modes. It has a different
+ home now.
+ -- Clear header length in mode_select unconditionally.
+ -- Removed the register_disk() that was added, not needed here.
+
+ 3.08 May 1, 2000 - Jens Axboe <axboe@suse.de>
+ -- Fix direction flag in setup_send_key and setup_report_key. This
+ gave some SCSI adapters problems.
+ -- Always return -EROFS for write opens
+ -- Convert to module_init/module_exit style init and remove some
+ of the #ifdef MODULE stuff
+ -- Fix several dvd errors - DVD_LU_SEND_ASF should pass agid,
+ DVD_HOST_SEND_RPC_STATE did not set buffer size in cdb, and
+ dvd_do_auth passed uninitialized data to drive because init_cdrom_command
+ did not clear a 0 sized buffer.
+
+ 3.09 May 12, 2000 - Jens Axboe <axboe@suse.de>
+ -- Fix Video-CD on SCSI drives that don't support READ_CD command. In
+ that case switch block size and issue plain READ_10 again, then switch
+ back.
+
+ 3.10 Jun 10, 2000 - Jens Axboe <axboe@suse.de>
+ -- Fix volume control on CD's - old SCSI-II drives now use their own
+ code, as doing MODE6 stuff in here is really not my intention.
+ -- Use READ_DISC_INFO for more reliable end-of-disc.
+
+ 3.11 Jun 12, 2000 - Jens Axboe <axboe@suse.de>
+ -- Fix bug in getting rpc phase 2 region info.
+ -- Reinstate "correct" CDROMPLAYTRKIND
+
+ 3.12 Oct 18, 2000 - Jens Axboe <axboe@suse.de>
+ -- Use quiet bit on packet commands not known to work
+
+ 3.20 Dec 17, 2003 - Jens Axboe <axboe@suse.de>
+ -- Various fixes and lots of cleanups not listed :-)
+ -- Locking fixes
+ -- Mt Rainier support
+ -- DVD-RAM write open fixes
+
+ Nov 5 2001, Aug 8 2002. Modified by Andy Polyakov
+ <appro@fy.chalmers.se> to support MMC-3 compliant DVD+RW units.
+
+ Modified by Nigel Kukard <nkukard@lbsd.net> - support DVD+RW
+ 2.4.x patch by Andy Polyakov <appro@fy.chalmers.se>
+
+-------------------------------------------------------------------------*/
+
+#define REVISION "Revision: 3.20"
+#define VERSION "Id: cdrom.c 3.20 2003/12/17"
+
+/* I use an error-log mask to give fine grain control over the type of
+ messages dumped to the system logs. The available masks include: */
+#define CD_NOTHING 0x0
+#define CD_WARNING 0x1
+#define CD_REG_UNREG 0x2
+#define CD_DO_IOCTL 0x4
+#define CD_OPEN 0x8
+#define CD_CLOSE 0x10
+#define CD_COUNT_TRACKS 0x20
+#define CD_CHANGER 0x40
+#define CD_DVD 0x80
+
+/* Define this to remove _all_ the debugging messages */
+/* #define ERRLOGMASK CD_NOTHING */
+#define ERRLOGMASK CD_WARNING
+/* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
+/* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/major.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/cdrom.h>
+#include <linux/sysctl.h>
+#include <linux/proc_fs.h>
+#include <linux/blkpg.h>
+#include <linux/init.h>
+#include <linux/fcntl.h>
+#include <linux/blkdev.h>
+#include <linux/times.h>
+
+#include <asm/uaccess.h>
+
+/* used to tell the module to turn on full debugging messages */
+static int debug;
+/* used to keep tray locked at all times */
+static int keeplocked;
+/* default compatibility mode */
+static int autoclose=1;
+static int autoeject;
+static int lockdoor = 1;
+/* will we ever get to use this... sigh. */
+static int check_media_type;
+/* automatically restart mrw format */
+static int mrw_format_restart = 1;
+module_param(debug, bool, 0);
+module_param(autoclose, bool, 0);
+module_param(autoeject, bool, 0);
+module_param(lockdoor, bool, 0);
+module_param(check_media_type, bool, 0);
+module_param(mrw_format_restart, bool, 0);
+
+static DEFINE_MUTEX(cdrom_mutex);
+
+static const char *mrw_format_status[] = {
+ "not mrw",
+ "bgformat inactive",
+ "bgformat active",
+ "mrw complete",
+};
+
+static const char *mrw_address_space[] = { "DMA", "GAA" };
+
+#if (ERRLOGMASK!=CD_NOTHING)
+#define cdinfo(type, fmt, args...) \
+ if ((ERRLOGMASK & type) || debug==1 ) \
+ printk(KERN_INFO "cdrom: " fmt, ## args)
+#else
+#define cdinfo(type, fmt, args...)
+#endif
+
+/* These are used to simplify getting data in from and back to user land */
+#define IOCTL_IN(arg, type, in) \
+ if (copy_from_user(&(in), (type __user *) (arg), sizeof (in))) \
+ return -EFAULT;
+
+#define IOCTL_OUT(arg, type, out) \
+ if (copy_to_user((type __user *) (arg), &(out), sizeof (out))) \
+ return -EFAULT;
+
+/* The (cdo->capability & ~cdi->mask & CDC_XXX) construct was used in
+ a lot of places. This macro makes the code more clear. */
+#define CDROM_CAN(type) (cdi->ops->capability & ~cdi->mask & (type))
+
+/* used in the audio ioctls */
+#define CHECKAUDIO if ((ret=check_for_audio_disc(cdi, cdo))) return ret
+
+/*
+ * Another popular OS uses 7 seconds as the hard timeout for default
+ * commands, so it is a good choice for us as well.
+ */
+#define CDROM_DEF_TIMEOUT (7 * HZ)
+
+/* Not-exported routines. */
+static int open_for_data(struct cdrom_device_info * cdi);
+static int check_for_audio_disc(struct cdrom_device_info * cdi,
+ struct cdrom_device_ops * cdo);
+static void sanitize_format(union cdrom_addr *addr,
+ u_char * curr, u_char requested);
+static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
+ unsigned long arg);
+
+int cdrom_get_last_written(struct cdrom_device_info *, long *);
+static int cdrom_get_next_writable(struct cdrom_device_info *, long *);
+static void cdrom_count_tracks(struct cdrom_device_info *, tracktype*);
+
+static int cdrom_mrw_exit(struct cdrom_device_info *cdi);
+
+static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di);
+
+static void cdrom_sysctl_register(void);
+
+static LIST_HEAD(cdrom_list);
+
+static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
+ struct packet_command *cgc)
+{
+ if (cgc->sense) {
+ cgc->sense->sense_key = 0x05;
+ cgc->sense->asc = 0x20;
+ cgc->sense->ascq = 0x00;
+ }
+
+ cgc->stat = -EIO;
+ return -EIO;
+}
+
+/* This macro makes sure we don't have to check on cdrom_device_ops
+ * existence in the run-time routines below. Change_capability is a
+ * hack to have the capability flags defined const, while we can still
+ * change it here without gcc complaining at every line.
+ */
+#define ENSURE(call, bits) if (cdo->call == NULL) *change_capability &= ~(bits)
+
+int register_cdrom(struct cdrom_device_info *cdi)
+{
+ static char banner_printed;
+ struct cdrom_device_ops *cdo = cdi->ops;
+ int *change_capability = (int *)&cdo->capability; /* hack */
+
+ cdinfo(CD_OPEN, "entering register_cdrom\n");
+
+ if (cdo->open == NULL || cdo->release == NULL)
+ return -EINVAL;
+ if (!banner_printed) {
+ printk(KERN_INFO "Uniform CD-ROM driver " REVISION "\n");
+ banner_printed = 1;
+ cdrom_sysctl_register();
+ }
+
+ ENSURE(drive_status, CDC_DRIVE_STATUS );
+ ENSURE(media_changed, CDC_MEDIA_CHANGED);
+ ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
+ ENSURE(lock_door, CDC_LOCK);
+ ENSURE(select_speed, CDC_SELECT_SPEED);
+ ENSURE(get_last_session, CDC_MULTI_SESSION);
+ ENSURE(get_mcn, CDC_MCN);
+ ENSURE(reset, CDC_RESET);
+ ENSURE(generic_packet, CDC_GENERIC_PACKET);
+ cdi->mc_flags = 0;
+ cdo->n_minors = 0;
+ cdi->options = CDO_USE_FFLAGS;
+
+ if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
+ cdi->options |= (int) CDO_AUTO_CLOSE;
+ if (autoeject==1 && CDROM_CAN(CDC_OPEN_TRAY))
+ cdi->options |= (int) CDO_AUTO_EJECT;
+ if (lockdoor==1)
+ cdi->options |= (int) CDO_LOCK;
+ if (check_media_type==1)
+ cdi->options |= (int) CDO_CHECK_TYPE;
+
+ if (CDROM_CAN(CDC_MRW_W))
+ cdi->exit = cdrom_mrw_exit;
+
+ if (cdi->disk)
+ cdi->cdda_method = CDDA_BPC_FULL;
+ else
+ cdi->cdda_method = CDDA_OLD;
+
+ if (!cdo->generic_packet)
+ cdo->generic_packet = cdrom_dummy_generic_packet;
+
+ cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
+ mutex_lock(&cdrom_mutex);
+ list_add(&cdi->list, &cdrom_list);
+ mutex_unlock(&cdrom_mutex);
+ return 0;
+}
+#undef ENSURE
+
+void unregister_cdrom(struct cdrom_device_info *cdi)
+{
+ cdinfo(CD_OPEN, "entering unregister_cdrom\n");
+
+ mutex_lock(&cdrom_mutex);
+ list_del(&cdi->list);
+ mutex_unlock(&cdrom_mutex);
+
+ if (cdi->exit)
+ cdi->exit(cdi);
+
+ cdi->ops->n_minors--;
+ cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
+}
+
+int cdrom_get_media_event(struct cdrom_device_info *cdi,
+ struct media_event_desc *med)
+{
+ struct packet_command cgc;
+ unsigned char buffer[8];
+ struct event_header *eh = (struct event_header *) buffer;
+
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION;
+ cgc.cmd[1] = 1; /* IMMED */
+ cgc.cmd[4] = 1 << 4; /* media event */
+ cgc.cmd[8] = sizeof(buffer);
+ cgc.quiet = 1;
+
+ if (cdi->ops->generic_packet(cdi, &cgc))
+ return 1;
+
+ if (be16_to_cpu(eh->data_len) < sizeof(*med))
+ return 1;
+
+ if (eh->nea || eh->notification_class != 0x4)
+ return 1;
+
+ memcpy(med, &buffer[sizeof(*eh)], sizeof(*med));
+ return 0;
+}
+
+/*
+ * the first prototypes used 0x2c as the page code for the mrw mode page,
+ * subsequently this was changed to 0x03. probe the one used by this drive
+ */
+static int cdrom_mrw_probe_pc(struct cdrom_device_info *cdi)
+{
+ struct packet_command cgc;
+ char buffer[16];
+
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
+
+ cgc.timeout = HZ;
+ cgc.quiet = 1;
+
+ if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC, 0)) {
+ cdi->mrw_mode_page = MRW_MODE_PC;
+ return 0;
+ } else if (!cdrom_mode_sense(cdi, &cgc, MRW_MODE_PC_PRE1, 0)) {
+ cdi->mrw_mode_page = MRW_MODE_PC_PRE1;
+ return 0;
+ }
+
+ return 1;
+}
+
+static int cdrom_is_mrw(struct cdrom_device_info *cdi, int *write)
+{
+ struct packet_command cgc;
+ struct mrw_feature_desc *mfd;
+ unsigned char buffer[16];
+ int ret;
+
+ *write = 0;
+
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
+
+ cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
+ cgc.cmd[3] = CDF_MRW;
+ cgc.cmd[8] = sizeof(buffer);
+ cgc.quiet = 1;
+
+ if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
+ return ret;
+
+ mfd = (struct mrw_feature_desc *)&buffer[sizeof(struct feature_header)];
+ if (be16_to_cpu(mfd->feature_code) != CDF_MRW)
+ return 1;
+ *write = mfd->write;
+
+ if ((ret = cdrom_mrw_probe_pc(cdi))) {
+ *write = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdrom_mrw_bgformat(struct cdrom_device_info *cdi, int cont)
+{
+ struct packet_command cgc;
+ unsigned char buffer[12];
+ int ret;
+
+ printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : "");
+
+ /*
+ * FmtData bit set (bit 4), format type is 1
+ */
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_WRITE);
+ cgc.cmd[0] = GPCMD_FORMAT_UNIT;
+ cgc.cmd[1] = (1 << 4) | 1;
+
+ cgc.timeout = 5 * 60 * HZ;
+
+ /*
+ * 4 byte format list header, 8 byte format list descriptor
+ */
+ buffer[1] = 1 << 1;
+ buffer[3] = 8;
+
+ /*
+ * nr_blocks field
+ */
+ buffer[4] = 0xff;
+ buffer[5] = 0xff;
+ buffer[6] = 0xff;
+ buffer[7] = 0xff;
+
+ buffer[8] = 0x24 << 2;
+ buffer[11] = cont;
+
+ ret = cdi->ops->generic_packet(cdi, &cgc);
+ if (ret)
+ printk(KERN_INFO "cdrom: bgformat failed\n");
+
+ return ret;
+}
+
+static int cdrom_mrw_bgformat_susp(struct cdrom_device_info *cdi, int immed)
+{
+ struct packet_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_CLOSE_TRACK;
+
+ /*
+ * Session = 1, Track = 0
+ */
+ cgc.cmd[1] = !!immed;
+ cgc.cmd[2] = 1 << 1;
+
+ cgc.timeout = 5 * 60 * HZ;
+
+ return cdi->ops->generic_packet(cdi, &cgc);
+}
+
+static int cdrom_flush_cache(struct cdrom_device_info *cdi)
+{
+ struct packet_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+
+ cgc.timeout = 5 * 60 * HZ;
+
+ return cdi->ops->generic_packet(cdi, &cgc);
+}
+
+static int cdrom_mrw_exit(struct cdrom_device_info *cdi)
+{
+ disc_information di;
+ int ret;
+
+ ret = cdrom_get_disc_info(cdi, &di);
+ if (ret < 0 || ret < (int)offsetof(typeof(di),disc_type))
+ return 1;
+
+ ret = 0;
+ if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) {
+ printk(KERN_INFO "cdrom: issuing MRW back ground "
+ "format suspend\n");
+ ret = cdrom_mrw_bgformat_susp(cdi, 0);
+ }
+
+ if (!ret && cdi->media_written)
+ ret = cdrom_flush_cache(cdi);
+
+ return ret;
+}
+
+static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space)
+{
+ struct packet_command cgc;
+ struct mode_page_header *mph;
+ char buffer[16];
+ int ret, offset, size;
+
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
+
+ cgc.buffer = buffer;
+ cgc.buflen = sizeof(buffer);
+
+ if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0)))
+ return ret;
+
+ mph = (struct mode_page_header *) buffer;
+ offset = be16_to_cpu(mph->desc_length);
+ size = be16_to_cpu(mph->mode_data_length) + 2;
+
+ buffer[offset + 3] = space;
+ cgc.buflen = size;
+
+ if ((ret = cdrom_mode_select(cdi, &cgc)))
+ return ret;
+
+ printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]);
+ return 0;
+}
+
+static int cdrom_get_random_writable(struct cdrom_device_info *cdi,
+ struct rwrt_feature_desc *rfd)
+{
+ struct packet_command cgc;
+ char buffer[24];
+ int ret;
+
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
+
+ cgc.cmd[0] = GPCMD_GET_CONFIGURATION; /* often 0x46 */
+ cgc.cmd[3] = CDF_RWRT; /* often 0x0020 */
+ cgc.cmd[8] = sizeof(buffer); /* often 0x18 */
+ cgc.quiet = 1;
+
+ if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
+ return ret;
+
+ memcpy(rfd, &buffer[sizeof(struct feature_header)], sizeof (*rfd));
+ return 0;
+}
+
+static int cdrom_has_defect_mgt(struct cdrom_device_info *cdi)
+{
+ struct packet_command cgc;
+ char buffer[16];
+ __be16 *feature_code;
+ int ret;
+
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
+
+ cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
+ cgc.cmd[3] = CDF_HWDM;
+ cgc.cmd[8] = sizeof(buffer);
+ cgc.quiet = 1;
+
+ if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
+ return ret;
+
+ feature_code = (__be16 *) &buffer[sizeof(struct feature_header)];
+ if (be16_to_cpu(*feature_code) == CDF_HWDM)
+ return 0;
+
+ return 1;
+}
+
+
+static int cdrom_is_random_writable(struct cdrom_device_info *cdi, int *write)
+{
+ struct rwrt_feature_desc rfd;
+ int ret;
+
+ *write = 0;
+
+ if ((ret = cdrom_get_random_writable(cdi, &rfd)))
+ return ret;
+
+ if (CDF_RWRT == be16_to_cpu(rfd.feature_code))
+ *write = 1;
+
+ return 0;
+}
+
+static int cdrom_media_erasable(struct cdrom_device_info *cdi)
+{
+ disc_information di;
+ int ret;
+
+ ret = cdrom_get_disc_info(cdi, &di);
+ if (ret < 0 || ret < offsetof(typeof(di), n_first_track))
+ return -1;
+
+ return di.erasable;
+}
+
+/*
+ * FIXME: check RO bit
+ */
+static int cdrom_dvdram_open_write(struct cdrom_device_info *cdi)
+{
+ int ret = cdrom_media_erasable(cdi);
+
+ /*
+ * allow writable open if media info read worked and media is
+ * erasable, _or_ if it fails since not all drives support it
+ */
+ if (!ret)
+ return 1;
+
+ return 0;
+}
+
+static int cdrom_mrw_open_write(struct cdrom_device_info *cdi)
+{
+ disc_information di;
+ int ret;
+
+ /*
+ * always reset to DMA lba space on open
+ */
+ if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) {
+ printk(KERN_ERR "cdrom: failed setting lba address space\n");
+ return 1;
+ }
+
+ ret = cdrom_get_disc_info(cdi, &di);
+ if (ret < 0 || ret < offsetof(typeof(di),disc_type))
+ return 1;
+
+ if (!di.erasable)
+ return 1;
+
+ /*
+ * mrw_status
+ * 0 - not MRW formatted
+ * 1 - MRW bgformat started, but not running or complete
+ * 2 - MRW bgformat in progress
+ * 3 - MRW formatting complete
+ */
+ ret = 0;
+ printk(KERN_INFO "cdrom open: mrw_status '%s'\n",
+ mrw_format_status[di.mrw_status]);
+ if (!di.mrw_status)
+ ret = 1;
+ else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE &&
+ mrw_format_restart)
+ ret = cdrom_mrw_bgformat(cdi, 1);
+
+ return ret;
+}
+
+static int mo_open_write(struct cdrom_device_info *cdi)
+{
+ struct packet_command cgc;
+ char buffer[255];
+ int ret;
+
+ init_cdrom_command(&cgc, &buffer, 4, CGC_DATA_READ);
+ cgc.quiet = 1;
+
+ /*
+ * obtain write protect information as per
+ * drivers/scsi/sd.c:sd_read_write_protect_flag
+ */
+
+ ret = cdrom_mode_sense(cdi, &cgc, GPMODE_ALL_PAGES, 0);
+ if (ret)
+ ret = cdrom_mode_sense(cdi, &cgc, GPMODE_VENDOR_PAGE, 0);
+ if (ret) {
+ cgc.buflen = 255;
+ ret = cdrom_mode_sense(cdi, &cgc, GPMODE_ALL_PAGES, 0);
+ }
+
+ /* drive gave us no info, let the user go ahead */
+ if (ret)
+ return 0;
+
+ return buffer[3] & 0x80;
+}
+
+static int cdrom_ram_open_write(struct cdrom_device_info *cdi)
+{
+ struct rwrt_feature_desc rfd;
+ int ret;
+
+ if ((ret = cdrom_has_defect_mgt(cdi)))
+ return ret;
+
+ if ((ret = cdrom_get_random_writable(cdi, &rfd)))
+ return ret;
+ else if (CDF_RWRT == be16_to_cpu(rfd.feature_code))
+ ret = !rfd.curr;
+
+ cdinfo(CD_OPEN, "can open for random write\n");
+ return ret;
+}
+
+static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
+{
+ struct packet_command cgc;
+ char buffer[32];
+ int ret, mmc3_profile;
+
+ init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
+
+ cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
+ cgc.cmd[1] = 0;
+ cgc.cmd[2] = cgc.cmd[3] = 0; /* Starting Feature Number */
+ cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
+ cgc.quiet = 1;
+
+ if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
+ mmc3_profile = 0xffff;
+ else
+ mmc3_profile = (buffer[6] << 8) | buffer[7];
+
+ cdi->mmc3_profile = mmc3_profile;
+}
+
+static int cdrom_is_dvd_rw(struct cdrom_device_info *cdi)
+{
+ switch (cdi->mmc3_profile) {
+ case 0x12: /* DVD-RAM */
+ case 0x1A: /* DVD+RW */
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+/*
+ * returns 0 for ok to open write, non-0 to disallow
+ */
+static int cdrom_open_write(struct cdrom_device_info *cdi)
+{
+ int mrw, mrw_write, ram_write;
+ int ret = 1;
+
+ mrw = 0;
+ if (!cdrom_is_mrw(cdi, &mrw_write))
+ mrw = 1;
+
+ if (CDROM_CAN(CDC_MO_DRIVE))
+ ram_write = 1;
+ else
+ (void) cdrom_is_random_writable(cdi, &ram_write);
+
+ if (mrw)
+ cdi->mask &= ~CDC_MRW;
+ else
+ cdi->mask |= CDC_MRW;
+
+ if (mrw_write)
+ cdi->mask &= ~CDC_MRW_W;
+ else
+ cdi->mask |= CDC_MRW_W;
+
+ if (ram_write)
+ cdi->mask &= ~CDC_RAM;
+ else
+ cdi->mask |= CDC_RAM;
+
+ if (CDROM_CAN(CDC_MRW_W))
+ ret = cdrom_mrw_open_write(cdi);
+ else if (CDROM_CAN(CDC_DVD_RAM))
+ ret = cdrom_dvdram_open_write(cdi);
+ else if (CDROM_CAN(CDC_RAM) &&
+ !CDROM_CAN(CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_MRW|CDC_MO_DRIVE))
+ ret = cdrom_ram_open_write(cdi);
+ else if (CDROM_CAN(CDC_MO_DRIVE))
+ ret = mo_open_write(cdi);
+ else if (!cdrom_is_dvd_rw(cdi))
+ ret = 0;
+
+ return ret;
+}
+
+static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi)
+{
+ struct packet_command cgc;
+
+ if (cdi->mmc3_profile != 0x1a) {
+ cdinfo(CD_CLOSE, "%s: No DVD+RW\n", cdi->name);
+ return;
+ }
+
+ if (!cdi->media_written) {
+ cdinfo(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name);
+ return;
+ }
+
+ printk(KERN_INFO "cdrom: %s: dirty DVD+RW media, \"finalizing\"\n",
+ cdi->name);
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+ cgc.timeout = 30*HZ;
+ cdi->ops->generic_packet(cdi, &cgc);
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_CLOSE_TRACK;
+ cgc.timeout = 3000*HZ;
+ cgc.quiet = 1;
+ cdi->ops->generic_packet(cdi, &cgc);
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_CLOSE_TRACK;
+ cgc.cmd[2] = 2; /* Close session */
+ cgc.quiet = 1;
+ cgc.timeout = 3000*HZ;
+ cdi->ops->generic_packet(cdi, &cgc);
+
+ cdi->media_written = 0;
+}
+
+static int cdrom_close_write(struct cdrom_device_info *cdi)
+{
+#if 0
+ return cdrom_flush_cache(cdi);
+#else
+ return 0;
+#endif
+}
+
+/* We use the open-option O_NONBLOCK to indicate that the
+ * purpose of opening is only for subsequent ioctl() calls; no device
+ * integrity checks are performed.
+ *
+ * We hope that all cd-player programs will adopt this convention. It
+ * is in their own interest: device control becomes a lot easier
+ * this way.
+ */
+int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode)
+{
+ int ret;
+
+ cdinfo(CD_OPEN, "entering cdrom_open\n");
+
+ /* if this was a O_NONBLOCK open and we should honor the flags,
+ * do a quick open without drive/disc integrity checks. */
+ cdi->use_count++;
+ if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) {
+ ret = cdi->ops->open(cdi, 1);
+ } else {
+ ret = open_for_data(cdi);
+ if (ret)
+ goto err;
+ cdrom_mmc3_profile(cdi);
+ if (mode & FMODE_WRITE) {
+ ret = -EROFS;
+ if (cdrom_open_write(cdi))
+ goto err_release;
+ if (!CDROM_CAN(CDC_RAM))
+ goto err_release;
+ ret = 0;
+ cdi->media_written = 0;
+ }
+ }
+
+ if (ret)
+ goto err;
+
+ cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n",
+ cdi->name, cdi->use_count);
+ /* Do this on open. Don't wait for mount, because they might
+ not be mounting, but opening with O_NONBLOCK */
+ check_disk_change(bdev);
+ return 0;
+err_release:
+ if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
+ cdi->ops->lock_door(cdi, 0);
+ cdinfo(CD_OPEN, "door unlocked.\n");
+ }
+ cdi->ops->release(cdi);
+err:
+ cdi->use_count--;
+ return ret;
+}
+
+static
+int open_for_data(struct cdrom_device_info * cdi)
+{
+ int ret;
+ struct cdrom_device_ops *cdo = cdi->ops;
+ tracktype tracks;
+ cdinfo(CD_OPEN, "entering open_for_data\n");
+ /* Check if the driver can report drive status. If it can, we
+ can do clever things. If it can't, well, we at least tried! */
+ if (cdo->drive_status != NULL) {
+ ret = cdo->drive_status(cdi, CDSL_CURRENT);
+ cdinfo(CD_OPEN, "drive_status=%d\n", ret);
+ if (ret == CDS_TRAY_OPEN) {
+ cdinfo(CD_OPEN, "the tray is open...\n");
+ /* can/may i close it? */
+ if (CDROM_CAN(CDC_CLOSE_TRAY) &&
+ cdi->options & CDO_AUTO_CLOSE) {
+ cdinfo(CD_OPEN, "trying to close the tray.\n");
+ ret=cdo->tray_move(cdi,0);
+ if (ret) {
+ cdinfo(CD_OPEN, "bummer. tried to close the tray but failed.\n");
+ /* Ignore the error from the low
+ level driver. We don't care why it
+ couldn't close the tray. We only care
+ that there is no disc in the drive,
+ since that is the _REAL_ problem here.*/
+ ret=-ENOMEDIUM;
+ goto clean_up_and_return;
+ }
+ } else {
+ cdinfo(CD_OPEN, "bummer. this drive can't close the tray.\n");
+ ret=-ENOMEDIUM;
+ goto clean_up_and_return;
+ }
+ /* Ok, the door should be closed now.. Check again */
+ ret = cdo->drive_status(cdi, CDSL_CURRENT);
+ if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) {
+ cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n");
+ cdinfo(CD_OPEN, "tray might not contain a medium.\n");
+ ret=-ENOMEDIUM;
+ goto clean_up_and_return;
+ }
+ cdinfo(CD_OPEN, "the tray is now closed.\n");
+ }
+ /* the door should be closed now, check for the disc */
+ ret = cdo->drive_status(cdi, CDSL_CURRENT);
+ if (ret!=CDS_DISC_OK) {
+ ret = -ENOMEDIUM;
+ goto clean_up_and_return;
+ }
+ }
+ cdrom_count_tracks(cdi, &tracks);
+ if (tracks.error == CDS_NO_DISC) {
+ cdinfo(CD_OPEN, "bummer. no disc.\n");
+ ret=-ENOMEDIUM;
+ goto clean_up_and_return;
+ }
+ /* CD-Players which don't use O_NONBLOCK, workman
+ * for example, need bit CDO_CHECK_TYPE cleared! */
+ if (tracks.data==0) {
+ if (cdi->options & CDO_CHECK_TYPE) {
+ /* give people a warning shot, now that CDO_CHECK_TYPE
+ is the default case! */
+ cdinfo(CD_OPEN, "bummer. wrong media type.\n");
+ cdinfo(CD_WARNING, "pid %d must open device O_NONBLOCK!\n",
+ (unsigned int)task_pid_nr(current));
+ ret=-EMEDIUMTYPE;
+ goto clean_up_and_return;
+ }
+ else {
+ cdinfo(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set.\n");
+ }
+ }
+
+ cdinfo(CD_OPEN, "all seems well, opening the device.\n");
+
+ /* all seems well, we can open the device */
+ ret = cdo->open(cdi, 0); /* open for data */
+ cdinfo(CD_OPEN, "opening the device gave me %d.\n", ret);
+ /* After all this careful checking, we shouldn't have problems
+ opening the device, but we don't want the device locked if
+ this somehow fails... */
+ if (ret) {
+ cdinfo(CD_OPEN, "open device failed.\n");
+ goto clean_up_and_return;
+ }
+ if (CDROM_CAN(CDC_LOCK) && (cdi->options & CDO_LOCK)) {
+ cdo->lock_door(cdi, 1);
+ cdinfo(CD_OPEN, "door locked.\n");
+ }
+ cdinfo(CD_OPEN, "device opened successfully.\n");
+ return ret;
+
+ /* Something failed. Try to unlock the drive, because some drivers
+ (notably ide-cd) lock the drive after every command. This produced
+ a nasty bug where after mount failed, the drive would remain locked!
+ This ensures that the drive gets unlocked after a mount fails. This
+ is a goto to avoid bloating the driver with redundant code. */
+clean_up_and_return:
+ cdinfo(CD_OPEN, "open failed.\n");
+ if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
+ cdo->lock_door(cdi, 0);
+ cdinfo(CD_OPEN, "door unlocked.\n");
+ }
+ return ret;
+}
+
+/* This code is similar to that in open_for_data. The routine is called
+ whenever an audio play operation is requested.
+*/
+static int check_for_audio_disc(struct cdrom_device_info * cdi,
+ struct cdrom_device_ops * cdo)
+{
+ int ret;
+ tracktype tracks;
+ cdinfo(CD_OPEN, "entering check_for_audio_disc\n");
+ if (!(cdi->options & CDO_CHECK_TYPE))
+ return 0;
+ if (cdo->drive_status != NULL) {
+ ret = cdo->drive_status(cdi, CDSL_CURRENT);
+ cdinfo(CD_OPEN, "drive_status=%d\n", ret);
+ if (ret == CDS_TRAY_OPEN) {
+ cdinfo(CD_OPEN, "the tray is open...\n");
+ /* can/may i close it? */
+ if (CDROM_CAN(CDC_CLOSE_TRAY) &&
+ cdi->options & CDO_AUTO_CLOSE) {
+ cdinfo(CD_OPEN, "trying to close the tray.\n");
+ ret=cdo->tray_move(cdi,0);
+ if (ret) {
+ cdinfo(CD_OPEN, "bummer. tried to close tray but failed.\n");
+ /* Ignore the error from the low
+ level driver. We don't care why it
+ couldn't close the tray. We only care
+ that there is no disc in the drive,
+ since that is the _REAL_ problem here.*/
+ return -ENOMEDIUM;
+ }
+ } else {
+ cdinfo(CD_OPEN, "bummer. this driver can't close the tray.\n");
+ return -ENOMEDIUM;
+ }
+ /* Ok, the door should be closed now.. Check again */
+ ret = cdo->drive_status(cdi, CDSL_CURRENT);
+ if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) {
+ cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n");
+ return -ENOMEDIUM;
+ }
+ if (ret!=CDS_DISC_OK) {
+ cdinfo(CD_OPEN, "bummer. disc isn't ready.\n");
+ return -EIO;
+ }
+ cdinfo(CD_OPEN, "the tray is now closed.\n");
+ }
+ }
+ cdrom_count_tracks(cdi, &tracks);
+ if (tracks.error)
+ return(tracks.error);
+
+ if (tracks.audio==0)
+ return -EMEDIUMTYPE;
+
+ return 0;
+}
+
+void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ int opened_for_data;
+
+ cdinfo(CD_CLOSE, "entering cdrom_release\n");
+
+ if (cdi->use_count > 0)
+ cdi->use_count--;
+
+ if (cdi->use_count == 0) {
+ cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
+ cdrom_dvd_rw_close_write(cdi);
+
+ if ((cdo->capability & CDC_LOCK) && !keeplocked) {
+ cdinfo(CD_CLOSE, "Unlocking door!\n");
+ cdo->lock_door(cdi, 0);
+ }
+ }
+
+ opened_for_data = !(cdi->options & CDO_USE_FFLAGS) ||
+ !(mode & FMODE_NDELAY);
+
+ /*
+ * flush cache on last write release
+ */
+ if (CDROM_CAN(CDC_RAM) && !cdi->use_count && cdi->for_data)
+ cdrom_close_write(cdi);
+
+ cdo->release(cdi);
+ if (cdi->use_count == 0) { /* last process that closes dev*/
+ if (opened_for_data &&
+ cdi->options & CDO_AUTO_EJECT && CDROM_CAN(CDC_OPEN_TRAY))
+ cdo->tray_move(cdi, 1);
+ }
+}
+
+static int cdrom_read_mech_status(struct cdrom_device_info *cdi,
+ struct cdrom_changer_info *buf)
+{
+ struct packet_command cgc;
+ struct cdrom_device_ops *cdo = cdi->ops;
+ int length;
+
+ /*
+ * Sanyo changer isn't spec compliant (doesn't use regular change
+ * LOAD_UNLOAD command, and it doesn't implement the mech status
+ * command below
+ */
+ if (cdi->sanyo_slot) {
+ buf->hdr.nslots = 3;
+ buf->hdr.curslot = cdi->sanyo_slot == 3 ? 0 : cdi->sanyo_slot;
+ for (length = 0; length < 3; length++) {
+ buf->slots[length].disc_present = 1;
+ buf->slots[length].change = 0;
+ }
+ return 0;
+ }
+
+ length = sizeof(struct cdrom_mechstat_header) +
+ cdi->capacity * sizeof(struct cdrom_slot);
+
+ init_cdrom_command(&cgc, buf, length, CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_MECHANISM_STATUS;
+ cgc.cmd[8] = (length >> 8) & 0xff;
+ cgc.cmd[9] = length & 0xff;
+ return cdo->generic_packet(cdi, &cgc);
+}
+
+static int cdrom_slot_status(struct cdrom_device_info *cdi, int slot)
+{
+ struct cdrom_changer_info *info;
+ int ret;
+
+ cdinfo(CD_CHANGER, "entering cdrom_slot_status()\n");
+ if (cdi->sanyo_slot)
+ return CDS_NO_INFO;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if ((ret = cdrom_read_mech_status(cdi, info)))
+ goto out_free;
+
+ if (info->slots[slot].disc_present)
+ ret = CDS_DISC_OK;
+ else
+ ret = CDS_NO_DISC;
+
+out_free:
+ kfree(info);
+ return ret;
+}
+
+/* Return the number of slots for an ATAPI/SCSI cdrom,
+ * return 1 if not a changer.
+ */
+int cdrom_number_of_slots(struct cdrom_device_info *cdi)
+{
+ int status;
+ int nslots = 1;
+ struct cdrom_changer_info *info;
+
+ cdinfo(CD_CHANGER, "entering cdrom_number_of_slots()\n");
+ /* cdrom_read_mech_status requires a valid value for capacity: */
+ cdi->capacity = 0;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if ((status = cdrom_read_mech_status(cdi, info)) == 0)
+ nslots = info->hdr.nslots;
+
+ kfree(info);
+ return nslots;
+}
+
+
+/* If SLOT < 0, unload the current slot. Otherwise, try to load SLOT. */
+static int cdrom_load_unload(struct cdrom_device_info *cdi, int slot)
+{
+ struct packet_command cgc;
+
+ cdinfo(CD_CHANGER, "entering cdrom_load_unload()\n");
+ if (cdi->sanyo_slot && slot < 0)
+ return 0;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_LOAD_UNLOAD;
+ cgc.cmd[4] = 2 + (slot >= 0);
+ cgc.cmd[8] = slot;
+ cgc.timeout = 60 * HZ;
+
+ /* The Sanyo 3 CD changer uses byte 7 of the
+ GPCMD_TEST_UNIT_READY to command to switch CDs instead of
+ using the GPCMD_LOAD_UNLOAD opcode. */
+ if (cdi->sanyo_slot && -1 < slot) {
+ cgc.cmd[0] = GPCMD_TEST_UNIT_READY;
+ cgc.cmd[7] = slot;
+ cgc.cmd[4] = cgc.cmd[8] = 0;
+ cdi->sanyo_slot = slot ? slot : 3;
+ }
+
+ return cdi->ops->generic_packet(cdi, &cgc);
+}
+
+static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
+{
+ struct cdrom_changer_info *info;
+ int curslot;
+ int ret;
+
+ cdinfo(CD_CHANGER, "entering cdrom_select_disc()\n");
+ if (!CDROM_CAN(CDC_SELECT_DISC))
+ return -EDRIVE_CANT_DO_THIS;
+
+ (void) cdi->ops->media_changed(cdi, slot);
+
+ if (slot == CDSL_NONE) {
+ /* set media changed bits, on both queues */
+ cdi->mc_flags = 0x3;
+ return cdrom_load_unload(cdi, -1);
+ }
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if ((ret = cdrom_read_mech_status(cdi, info))) {
+ kfree(info);
+ return ret;
+ }
+
+ curslot = info->hdr.curslot;
+ kfree(info);
+
+ if (cdi->use_count > 1 || keeplocked) {
+ if (slot == CDSL_CURRENT) {
+ return curslot;
+ } else {
+ return -EBUSY;
+ }
+ }
+
+ /* Specifying CDSL_CURRENT will attempt to load the currnet slot,
+ which is useful if it had been previously unloaded.
+ Whether it can or not, it returns the current slot.
+ Similarly, if slot happens to be the current one, we still
+ try and load it. */
+ if (slot == CDSL_CURRENT)
+ slot = curslot;
+
+ /* set media changed bits on both queues */
+ cdi->mc_flags = 0x3;
+ if ((ret = cdrom_load_unload(cdi, slot)))
+ return ret;
+
+ return slot;
+}
+
+/* We want to make media_changed accessible to the user through an
+ * ioctl. The main problem now is that we must double-buffer the
+ * low-level implementation, to assure that the VFS and the user both
+ * see a medium change once.
+ */
+
+static
+int media_changed(struct cdrom_device_info *cdi, int queue)
+{
+ unsigned int mask = (1 << (queue & 1));
+ int ret = !!(cdi->mc_flags & mask);
+
+ if (!CDROM_CAN(CDC_MEDIA_CHANGED))
+ return ret;
+ /* changed since last call? */
+ if (cdi->ops->media_changed(cdi, CDSL_CURRENT)) {
+ cdi->mc_flags = 0x3; /* set bit on both queues */
+ ret |= 1;
+ cdi->media_written = 0;
+ }
+ cdi->mc_flags &= ~mask; /* clear bit */
+ return ret;
+}
+
+int cdrom_media_changed(struct cdrom_device_info *cdi)
+{
+ /* This talks to the VFS, which doesn't like errors - just 1 or 0.
+ * Returning "0" is always safe (media hasn't been changed). Do that
+ * if the low-level cdrom driver dosn't support media changed. */
+ if (cdi == NULL || cdi->ops->media_changed == NULL)
+ return 0;
+ if (!CDROM_CAN(CDC_MEDIA_CHANGED))
+ return 0;
+ return media_changed(cdi, 0);
+}
+
+/* badly broken, I know. Is due for a fixup anytime. */
+static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype* tracks)
+{
+ struct cdrom_tochdr header;
+ struct cdrom_tocentry entry;
+ int ret, i;
+ tracks->data=0;
+ tracks->audio=0;
+ tracks->cdi=0;
+ tracks->xa=0;
+ tracks->error=0;
+ cdinfo(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
+ /* Grab the TOC header so we can see how many tracks there are */
+ if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header))) {
+ if (ret == -ENOMEDIUM)
+ tracks->error = CDS_NO_DISC;
+ else
+ tracks->error = CDS_NO_INFO;
+ return;
+ }
+ /* check what type of tracks are on this disc */
+ entry.cdte_format = CDROM_MSF;
+ for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) {
+ entry.cdte_track = i;
+ if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) {
+ tracks->error=CDS_NO_INFO;
+ return;
+ }
+ if (entry.cdte_ctrl & CDROM_DATA_TRACK) {
+ if (entry.cdte_format == 0x10)
+ tracks->cdi++;
+ else if (entry.cdte_format == 0x20)
+ tracks->xa++;
+ else
+ tracks->data++;
+ } else
+ tracks->audio++;
+ cdinfo(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n",
+ i, entry.cdte_format, entry.cdte_ctrl);
+ }
+ cdinfo(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n",
+ header.cdth_trk1, tracks->audio, tracks->data,
+ tracks->cdi, tracks->xa);
+}
+
+/* Requests to the low-level drivers will /always/ be done in the
+ following format convention:
+
+ CDROM_LBA: all data-related requests.
+ CDROM_MSF: all audio-related requests.
+
+ However, a low-level implementation is allowed to refuse this
+ request, and return information in its own favorite format.
+
+ It doesn't make sense /at all/ to ask for a play_audio in LBA
+ format, or ask for multi-session info in MSF format. However, for
+ backward compatibility these format requests will be satisfied, but
+ the requests to the low-level drivers will be sanitized in the more
+ meaningful format indicated above.
+ */
+
+static
+void sanitize_format(union cdrom_addr *addr,
+ u_char * curr, u_char requested)
+{
+ if (*curr == requested)
+ return; /* nothing to be done! */
+ if (requested == CDROM_LBA) {
+ addr->lba = (int) addr->msf.frame +
+ 75 * (addr->msf.second - 2 + 60 * addr->msf.minute);
+ } else { /* CDROM_MSF */
+ int lba = addr->lba;
+ addr->msf.frame = lba % 75;
+ lba /= 75;
+ lba += 2;
+ addr->msf.second = lba % 60;
+ addr->msf.minute = lba / 60;
+ }
+ *curr = requested;
+}
+
+void init_cdrom_command(struct packet_command *cgc, void *buf, int len,
+ int type)
+{
+ memset(cgc, 0, sizeof(struct packet_command));
+ if (buf)
+ memset(buf, 0, len);
+ cgc->buffer = (char *) buf;
+ cgc->buflen = len;
+ cgc->data_direction = type;
+ cgc->timeout = CDROM_DEF_TIMEOUT;
+}
+
+/* DVD handling */
+
+#define copy_key(dest,src) memcpy((dest), (src), sizeof(dvd_key))
+#define copy_chal(dest,src) memcpy((dest), (src), sizeof(dvd_challenge))
+
+static void setup_report_key(struct packet_command *cgc, unsigned agid, unsigned type)
+{
+ cgc->cmd[0] = GPCMD_REPORT_KEY;
+ cgc->cmd[10] = type | (agid << 6);
+ switch (type) {
+ case 0: case 8: case 5: {
+ cgc->buflen = 8;
+ break;
+ }
+ case 1: {
+ cgc->buflen = 16;
+ break;
+ }
+ case 2: case 4: {
+ cgc->buflen = 12;
+ break;
+ }
+ }
+ cgc->cmd[9] = cgc->buflen;
+ cgc->data_direction = CGC_DATA_READ;
+}
+
+static void setup_send_key(struct packet_command *cgc, unsigned agid, unsigned type)
+{
+ cgc->cmd[0] = GPCMD_SEND_KEY;
+ cgc->cmd[10] = type | (agid << 6);
+ switch (type) {
+ case 1: {
+ cgc->buflen = 16;
+ break;
+ }
+ case 3: {
+ cgc->buflen = 12;
+ break;
+ }
+ case 6: {
+ cgc->buflen = 8;
+ break;
+ }
+ }
+ cgc->cmd[9] = cgc->buflen;
+ cgc->data_direction = CGC_DATA_WRITE;
+}
+
+static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
+{
+ int ret;
+ u_char buf[20];
+ struct packet_command cgc;
+ struct cdrom_device_ops *cdo = cdi->ops;
+ rpc_state_t rpc_state;
+
+ memset(buf, 0, sizeof(buf));
+ init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ);
+
+ switch (ai->type) {
+ /* LU data send */
+ case DVD_LU_SEND_AGID:
+ cdinfo(CD_DVD, "entering DVD_LU_SEND_AGID\n");
+ cgc.quiet = 1;
+ setup_report_key(&cgc, ai->lsa.agid, 0);
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ ai->lsa.agid = buf[7] >> 6;
+ /* Returning data, let host change state */
+ break;
+
+ case DVD_LU_SEND_KEY1:
+ cdinfo(CD_DVD, "entering DVD_LU_SEND_KEY1\n");
+ setup_report_key(&cgc, ai->lsk.agid, 2);
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ copy_key(ai->lsk.key, &buf[4]);
+ /* Returning data, let host change state */
+ break;
+
+ case DVD_LU_SEND_CHALLENGE:
+ cdinfo(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n");
+ setup_report_key(&cgc, ai->lsc.agid, 1);
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ copy_chal(ai->lsc.chal, &buf[4]);
+ /* Returning data, let host change state */
+ break;
+
+ /* Post-auth key */
+ case DVD_LU_SEND_TITLE_KEY:
+ cdinfo(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n");
+ cgc.quiet = 1;
+ setup_report_key(&cgc, ai->lstk.agid, 4);
+ cgc.cmd[5] = ai->lstk.lba;
+ cgc.cmd[4] = ai->lstk.lba >> 8;
+ cgc.cmd[3] = ai->lstk.lba >> 16;
+ cgc.cmd[2] = ai->lstk.lba >> 24;
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ ai->lstk.cpm = (buf[4] >> 7) & 1;
+ ai->lstk.cp_sec = (buf[4] >> 6) & 1;
+ ai->lstk.cgms = (buf[4] >> 4) & 3;
+ copy_key(ai->lstk.title_key, &buf[5]);
+ /* Returning data, let host change state */
+ break;
+
+ case DVD_LU_SEND_ASF:
+ cdinfo(CD_DVD, "entering DVD_LU_SEND_ASF\n");
+ setup_report_key(&cgc, ai->lsasf.agid, 5);
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ ai->lsasf.asf = buf[7] & 1;
+ break;
+
+ /* LU data receive (LU changes state) */
+ case DVD_HOST_SEND_CHALLENGE:
+ cdinfo(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n");
+ setup_send_key(&cgc, ai->hsc.agid, 1);
+ buf[1] = 0xe;
+ copy_chal(&buf[4], ai->hsc.chal);
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ ai->type = DVD_LU_SEND_KEY1;
+ break;
+
+ case DVD_HOST_SEND_KEY2:
+ cdinfo(CD_DVD, "entering DVD_HOST_SEND_KEY2\n");
+ setup_send_key(&cgc, ai->hsk.agid, 3);
+ buf[1] = 0xa;
+ copy_key(&buf[4], ai->hsk.key);
+
+ if ((ret = cdo->generic_packet(cdi, &cgc))) {
+ ai->type = DVD_AUTH_FAILURE;
+ return ret;
+ }
+ ai->type = DVD_AUTH_ESTABLISHED;
+ break;
+
+ /* Misc */
+ case DVD_INVALIDATE_AGID:
+ cgc.quiet = 1;
+ cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n");
+ setup_report_key(&cgc, ai->lsa.agid, 0x3f);
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+ break;
+
+ /* Get region settings */
+ case DVD_LU_SEND_RPC_STATE:
+ cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n");
+ setup_report_key(&cgc, 0, 8);
+ memset(&rpc_state, 0, sizeof(rpc_state_t));
+ cgc.buffer = (char *) &rpc_state;
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ ai->lrpcs.type = rpc_state.type_code;
+ ai->lrpcs.vra = rpc_state.vra;
+ ai->lrpcs.ucca = rpc_state.ucca;
+ ai->lrpcs.region_mask = rpc_state.region_mask;
+ ai->lrpcs.rpc_scheme = rpc_state.rpc_scheme;
+ break;
+
+ /* Set region settings */
+ case DVD_HOST_SEND_RPC_STATE:
+ cdinfo(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n");
+ setup_send_key(&cgc, 0, 6);
+ buf[1] = 6;
+ buf[4] = ai->hrpcs.pdrc;
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+ break;
+
+ default:
+ cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type);
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
+{
+ unsigned char buf[21], *base;
+ struct dvd_layer *layer;
+ struct cdrom_device_ops *cdo = cdi->ops;
+ int ret, layer_num = s->physical.layer_num;
+
+ if (layer_num >= DVD_LAYERS)
+ return -EINVAL;
+
+ init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[6] = layer_num;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[9] = cgc->buflen & 0xff;
+
+ /*
+ * refrain from reporting errors on non-existing layers (mainly)
+ */
+ cgc->quiet = 1;
+
+ ret = cdo->generic_packet(cdi, cgc);
+ if (ret)
+ return ret;
+
+ base = &buf[4];
+ layer = &s->physical.layer[layer_num];
+
+ /*
+ * place the data... really ugly, but at least we won't have to
+ * worry about endianess in userspace.
+ */
+ memset(layer, 0, sizeof(*layer));
+ layer->book_version = base[0] & 0xf;
+ layer->book_type = base[0] >> 4;
+ layer->min_rate = base[1] & 0xf;
+ layer->disc_size = base[1] >> 4;
+ layer->layer_type = base[2] & 0xf;
+ layer->track_path = (base[2] >> 4) & 1;
+ layer->nlayers = (base[2] >> 5) & 3;
+ layer->track_density = base[3] & 0xf;
+ layer->linear_density = base[3] >> 4;
+ layer->start_sector = base[5] << 16 | base[6] << 8 | base[7];
+ layer->end_sector = base[9] << 16 | base[10] << 8 | base[11];
+ layer->end_sector_l0 = base[13] << 16 | base[14] << 8 | base[15];
+ layer->bca = base[16] >> 7;
+
+ return 0;
+}
+
+static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
+{
+ int ret;
+ u_char buf[8];
+ struct cdrom_device_ops *cdo = cdi->ops;
+
+ init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[6] = s->copyright.layer_num;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[8] = cgc->buflen >> 8;
+ cgc->cmd[9] = cgc->buflen & 0xff;
+
+ ret = cdo->generic_packet(cdi, cgc);
+ if (ret)
+ return ret;
+
+ s->copyright.cpst = buf[4];
+ s->copyright.rmi = buf[5];
+
+ return 0;
+}
+
+static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
+{
+ int ret, size;
+ u_char *buf;
+ struct cdrom_device_ops *cdo = cdi->ops;
+
+ size = sizeof(s->disckey.value) + 4;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[8] = size >> 8;
+ cgc->cmd[9] = size & 0xff;
+ cgc->cmd[10] = s->disckey.agid << 6;
+
+ ret = cdo->generic_packet(cdi, cgc);
+ if (!ret)
+ memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value));
+
+ kfree(buf);
+ return ret;
+}
+
+static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
+{
+ int ret, size = 4 + 188;
+ u_char *buf;
+ struct cdrom_device_ops *cdo = cdi->ops;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[9] = cgc->buflen & 0xff;
+
+ ret = cdo->generic_packet(cdi, cgc);
+ if (ret)
+ goto out;
+
+ s->bca.len = buf[0] << 8 | buf[1];
+ if (s->bca.len < 12 || s->bca.len > 188) {
+ cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
+ ret = -EIO;
+ goto out;
+ }
+ memcpy(s->bca.value, &buf[4], s->bca.len);
+ ret = 0;
+out:
+ kfree(buf);
+ return ret;
+}
+
+static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
+{
+ int ret = 0, size;
+ u_char *buf;
+ struct cdrom_device_ops *cdo = cdi->ops;
+
+ size = sizeof(s->manufact.value) + 4;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
+ cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
+ cgc->cmd[7] = s->type;
+ cgc->cmd[8] = size >> 8;
+ cgc->cmd[9] = size & 0xff;
+
+ ret = cdo->generic_packet(cdi, cgc);
+ if (ret)
+ goto out;
+
+ s->manufact.len = buf[0] << 8 | buf[1];
+ if (s->manufact.len < 0 || s->manufact.len > 2048) {
+ cdinfo(CD_WARNING, "Received invalid manufacture info length"
+ " (%d)\n", s->manufact.len);
+ ret = -EIO;
+ } else {
+ memcpy(s->manufact.value, &buf[4], s->manufact.len);
+ }
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s,
+ struct packet_command *cgc)
+{
+ switch (s->type) {
+ case DVD_STRUCT_PHYSICAL:
+ return dvd_read_physical(cdi, s, cgc);
+
+ case DVD_STRUCT_COPYRIGHT:
+ return dvd_read_copyright(cdi, s, cgc);
+
+ case DVD_STRUCT_DISCKEY:
+ return dvd_read_disckey(cdi, s, cgc);
+
+ case DVD_STRUCT_BCA:
+ return dvd_read_bca(cdi, s, cgc);
+
+ case DVD_STRUCT_MANUFACT:
+ return dvd_read_manufact(cdi, s, cgc);
+
+ default:
+ cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
+ s->type);
+ return -EINVAL;
+ }
+}
+
+int cdrom_mode_sense(struct cdrom_device_info *cdi,
+ struct packet_command *cgc,
+ int page_code, int page_control)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+
+ memset(cgc->cmd, 0, sizeof(cgc->cmd));
+
+ cgc->cmd[0] = GPCMD_MODE_SENSE_10;
+ cgc->cmd[2] = page_code | (page_control << 6);
+ cgc->cmd[7] = cgc->buflen >> 8;
+ cgc->cmd[8] = cgc->buflen & 0xff;
+ cgc->data_direction = CGC_DATA_READ;
+ return cdo->generic_packet(cdi, cgc);
+}
+
+int cdrom_mode_select(struct cdrom_device_info *cdi,
+ struct packet_command *cgc)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+
+ memset(cgc->cmd, 0, sizeof(cgc->cmd));
+ memset(cgc->buffer, 0, 2);
+ cgc->cmd[0] = GPCMD_MODE_SELECT_10;
+ cgc->cmd[1] = 0x10; /* PF */
+ cgc->cmd[7] = cgc->buflen >> 8;
+ cgc->cmd[8] = cgc->buflen & 0xff;
+ cgc->data_direction = CGC_DATA_WRITE;
+ return cdo->generic_packet(cdi, cgc);
+}
+
+static int cdrom_read_subchannel(struct cdrom_device_info *cdi,
+ struct cdrom_subchnl *subchnl, int mcn)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ struct packet_command cgc;
+ char buffer[32];
+ int ret;
+
+ init_cdrom_command(&cgc, buffer, 16, CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_SUBCHANNEL;
+ cgc.cmd[1] = 2; /* MSF addressing */
+ cgc.cmd[2] = 0x40; /* request subQ data */
+ cgc.cmd[3] = mcn ? 2 : 1;
+ cgc.cmd[8] = 16;
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ subchnl->cdsc_audiostatus = cgc.buffer[1];
+ subchnl->cdsc_format = CDROM_MSF;
+ subchnl->cdsc_ctrl = cgc.buffer[5] & 0xf;
+ subchnl->cdsc_trk = cgc.buffer[6];
+ subchnl->cdsc_ind = cgc.buffer[7];
+
+ subchnl->cdsc_reladdr.msf.minute = cgc.buffer[13];
+ subchnl->cdsc_reladdr.msf.second = cgc.buffer[14];
+ subchnl->cdsc_reladdr.msf.frame = cgc.buffer[15];
+ subchnl->cdsc_absaddr.msf.minute = cgc.buffer[9];
+ subchnl->cdsc_absaddr.msf.second = cgc.buffer[10];
+ subchnl->cdsc_absaddr.msf.frame = cgc.buffer[11];
+
+ return 0;
+}
+
+/*
+ * Specific READ_10 interface
+ */
+static int cdrom_read_cd(struct cdrom_device_info *cdi,
+ struct packet_command *cgc, int lba,
+ int blocksize, int nblocks)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+
+ memset(&cgc->cmd, 0, sizeof(cgc->cmd));
+ cgc->cmd[0] = GPCMD_READ_10;
+ cgc->cmd[2] = (lba >> 24) & 0xff;
+ cgc->cmd[3] = (lba >> 16) & 0xff;
+ cgc->cmd[4] = (lba >> 8) & 0xff;
+ cgc->cmd[5] = lba & 0xff;
+ cgc->cmd[6] = (nblocks >> 16) & 0xff;
+ cgc->cmd[7] = (nblocks >> 8) & 0xff;
+ cgc->cmd[8] = nblocks & 0xff;
+ cgc->buflen = blocksize * nblocks;
+ return cdo->generic_packet(cdi, cgc);
+}
+
+/* very generic interface for reading the various types of blocks */
+static int cdrom_read_block(struct cdrom_device_info *cdi,
+ struct packet_command *cgc,
+ int lba, int nblocks, int format, int blksize)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+
+ memset(&cgc->cmd, 0, sizeof(cgc->cmd));
+ cgc->cmd[0] = GPCMD_READ_CD;
+ /* expected sector size - cdda,mode1,etc. */
+ cgc->cmd[1] = format << 2;
+ /* starting address */
+ cgc->cmd[2] = (lba >> 24) & 0xff;
+ cgc->cmd[3] = (lba >> 16) & 0xff;
+ cgc->cmd[4] = (lba >> 8) & 0xff;
+ cgc->cmd[5] = lba & 0xff;
+ /* number of blocks */
+ cgc->cmd[6] = (nblocks >> 16) & 0xff;
+ cgc->cmd[7] = (nblocks >> 8) & 0xff;
+ cgc->cmd[8] = nblocks & 0xff;
+ cgc->buflen = blksize * nblocks;
+
+ /* set the header info returned */
+ switch (blksize) {
+ case CD_FRAMESIZE_RAW0 : cgc->cmd[9] = 0x58; break;
+ case CD_FRAMESIZE_RAW1 : cgc->cmd[9] = 0x78; break;
+ case CD_FRAMESIZE_RAW : cgc->cmd[9] = 0xf8; break;
+ default : cgc->cmd[9] = 0x10;
+ }
+
+ return cdo->generic_packet(cdi, cgc);
+}
+
+static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
+ int lba, int nframes)
+{
+ struct packet_command cgc;
+ int ret = 0;
+ int nr;
+
+ cdi->last_sense = 0;
+
+ memset(&cgc, 0, sizeof(cgc));
+
+ /*
+ * start with will ra.nframes size, back down if alloc fails
+ */
+ nr = nframes;
+ do {
+ cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
+ if (cgc.buffer)
+ break;
+
+ nr >>= 1;
+ } while (nr);
+
+ if (!nr)
+ return -ENOMEM;
+
+ if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ cgc.data_direction = CGC_DATA_READ;
+ while (nframes > 0) {
+ if (nr > nframes)
+ nr = nframes;
+
+ ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
+ if (ret)
+ break;
+ if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
+ ret = -EFAULT;
+ break;
+ }
+ ubuf += CD_FRAMESIZE_RAW * nr;
+ nframes -= nr;
+ lba += nr;
+ }
+out:
+ kfree(cgc.buffer);
+ return ret;
+}
+
+static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
+ int lba, int nframes)
+{
+ struct request_queue *q = cdi->disk->queue;
+ struct request *rq;
+ struct bio *bio;
+ unsigned int len;
+ int nr, ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ cdi->last_sense = 0;
+
+ while (nframes) {
+ nr = nframes;
+ if (cdi->cdda_method == CDDA_BPC_SINGLE)
+ nr = 1;
+ if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
+ nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
+
+ len = nr * CD_FRAMESIZE_RAW;
+
+ rq = blk_get_request(q, READ, GFP_KERNEL);
+ if (!rq) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL);
+ if (ret) {
+ blk_put_request(rq);
+ break;
+ }
+
+ rq->cmd[0] = GPCMD_READ_CD;
+ rq->cmd[1] = 1 << 2;
+ rq->cmd[2] = (lba >> 24) & 0xff;
+ rq->cmd[3] = (lba >> 16) & 0xff;
+ rq->cmd[4] = (lba >> 8) & 0xff;
+ rq->cmd[5] = lba & 0xff;
+ rq->cmd[6] = (nr >> 16) & 0xff;
+ rq->cmd[7] = (nr >> 8) & 0xff;
+ rq->cmd[8] = nr & 0xff;
+ rq->cmd[9] = 0xf8;
+
+ rq->cmd_len = 12;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->timeout = 60 * HZ;
+ bio = rq->bio;
+
+ if (blk_execute_rq(q, cdi->disk, rq, 0)) {
+ struct request_sense *s = rq->sense;
+ ret = -EIO;
+ cdi->last_sense = s->sense_key;
+ }
+
+ if (blk_rq_unmap_user(bio))
+ ret = -EFAULT;
+ blk_put_request(rq);
+
+ if (ret)
+ break;
+
+ nframes -= nr;
+ lba += nr;
+ ubuf += len;
+ }
+
+ return ret;
+}
+
+static int cdrom_read_cdda(struct cdrom_device_info *cdi, __u8 __user *ubuf,
+ int lba, int nframes)
+{
+ int ret;
+
+ if (cdi->cdda_method == CDDA_OLD)
+ return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
+
+retry:
+ /*
+ * for anything else than success and io error, we need to retry
+ */
+ ret = cdrom_read_cdda_bpc(cdi, ubuf, lba, nframes);
+ if (!ret || ret != -EIO)
+ return ret;
+
+ /*
+ * I've seen drives get sense 4/8/3 udma crc errors on multi
+ * frame dma, so drop to single frame dma if we need to
+ */
+ if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) {
+ printk("cdrom: dropping to single frame dma\n");
+ cdi->cdda_method = CDDA_BPC_SINGLE;
+ goto retry;
+ }
+
+ /*
+ * so we have an io error of some sort with multi frame dma. if the
+ * condition wasn't a hardware error
+ * problems, not for any error
+ */
+ if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b)
+ return ret;
+
+ printk("cdrom: dropping to old style cdda (sense=%x)\n", cdi->last_sense);
+ cdi->cdda_method = CDDA_OLD;
+ return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);
+}
+
+static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_multisession ms_info;
+ u8 requested_format;
+ int ret;
+
+ cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
+
+ if (!(cdi->ops->capability & CDC_MULTI_SESSION))
+ return -ENOSYS;
+
+ if (copy_from_user(&ms_info, argp, sizeof(ms_info)))
+ return -EFAULT;
+
+ requested_format = ms_info.addr_format;
+ if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
+ return -EINVAL;
+ ms_info.addr_format = CDROM_LBA;
+
+ ret = cdi->ops->get_last_session(cdi, &ms_info);
+ if (ret)
+ return ret;
+
+ sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format);
+
+ if (copy_to_user(argp, &ms_info, sizeof(ms_info)))
+ return -EFAULT;
+
+ cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
+ return 0;
+}
+
+static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n");
+
+ if (!CDROM_CAN(CDC_OPEN_TRAY))
+ return -ENOSYS;
+ if (cdi->use_count != 1 || keeplocked)
+ return -EBUSY;
+ if (CDROM_CAN(CDC_LOCK)) {
+ int ret = cdi->ops->lock_door(cdi, 0);
+ if (ret)
+ return ret;
+ }
+
+ return cdi->ops->tray_move(cdi, 1);
+}
+
+static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n");
+
+ if (!CDROM_CAN(CDC_CLOSE_TRAY))
+ return -ENOSYS;
+ return cdi->ops->tray_move(cdi, 0);
+}
+
+static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n");
+
+ if (!CDROM_CAN(CDC_OPEN_TRAY))
+ return -ENOSYS;
+ if (keeplocked)
+ return -EBUSY;
+
+ cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
+ if (arg)
+ cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT;
+ return 0;
+}
+
+static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ struct cdrom_changer_info *info;
+ int ret;
+
+ cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n");
+
+ if (!CDROM_CAN(CDC_MEDIA_CHANGED))
+ return -ENOSYS;
+
+ /* cannot select disc or select current disc */
+ if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
+ return media_changed(cdi, 1);
+
+ if ((unsigned int)arg >= cdi->capacity)
+ return -EINVAL;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ret = cdrom_read_mech_status(cdi, info);
+ if (!ret)
+ ret = info->slots[arg].change;
+ kfree(info);
+ return ret;
+}
+
+static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n");
+
+ /*
+ * Options need to be in sync with capability.
+ * Too late for that, so we have to check each one separately.
+ */
+ switch (arg) {
+ case CDO_USE_FFLAGS:
+ case CDO_CHECK_TYPE:
+ break;
+ case CDO_LOCK:
+ if (!CDROM_CAN(CDC_LOCK))
+ return -ENOSYS;
+ break;
+ case 0:
+ return cdi->options;
+ /* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */
+ default:
+ if (!CDROM_CAN(arg))
+ return -ENOSYS;
+ }
+ cdi->options |= (int) arg;
+ return cdi->options;
+}
+
+static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n");
+
+ cdi->options &= ~(int) arg;
+ return cdi->options;
+}
+
+static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n");
+
+ if (!CDROM_CAN(CDC_SELECT_SPEED))
+ return -ENOSYS;
+ return cdi->ops->select_speed(cdi, arg);
+}
+
+static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n");
+
+ if (!CDROM_CAN(CDC_SELECT_DISC))
+ return -ENOSYS;
+
+ if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
+ if ((int)arg >= cdi->capacity)
+ return -EINVAL;
+ }
+
+ /*
+ * ->select_disc is a hook to allow a driver-specific way of
+ * seleting disc. However, since there is no equivalent hook for
+ * cdrom_slot_status this may not actually be useful...
+ */
+ if (cdi->ops->select_disc)
+ return cdi->ops->select_disc(cdi, arg);
+
+ cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n");
+ return cdrom_select_disc(cdi, arg);
+}
+
+static int cdrom_ioctl_reset(struct cdrom_device_info *cdi,
+ struct block_device *bdev)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n");
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!CDROM_CAN(CDC_RESET))
+ return -ENOSYS;
+ invalidate_bdev(bdev);
+ return cdi->ops->reset(cdi);
+}
+
+static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl");
+
+ if (!CDROM_CAN(CDC_LOCK))
+ return -EDRIVE_CANT_DO_THIS;
+
+ keeplocked = arg ? 1 : 0;
+
+ /*
+ * Don't unlock the door on multiple opens by default, but allow
+ * root to do so.
+ */
+ if (cdi->use_count != 1 && !arg && !capable(CAP_SYS_ADMIN))
+ return -EBUSY;
+ return cdi->ops->lock_door(cdi, arg);
+}
+
+static int cdrom_ioctl_debug(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis");
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ debug = arg ? 1 : 0;
+ return debug;
+}
+
+static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n");
+ return (cdi->ops->capability & ~cdi->mask);
+}
+
+/*
+ * The following function is implemented, although very few audio
+ * discs give Universal Product Code information, which should just be
+ * the Medium Catalog Number on the box. Note, that the way the code
+ * is written on the CD is /not/ uniform across all discs!
+ */
+static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_mcn mcn;
+ int ret;
+
+ cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n");
+
+ if (!(cdi->ops->capability & CDC_MCN))
+ return -ENOSYS;
+ ret = cdi->ops->get_mcn(cdi, &mcn);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(argp, &mcn, sizeof(mcn)))
+ return -EFAULT;
+ cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n");
+ return 0;
+}
+
+static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n");
+
+ if (!(cdi->ops->capability & CDC_DRIVE_STATUS))
+ return -ENOSYS;
+ if (!CDROM_CAN(CDC_SELECT_DISC) ||
+ (arg == CDSL_CURRENT || arg == CDSL_NONE))
+ return cdi->ops->drive_status(cdi, CDSL_CURRENT);
+ if (((int)arg >= cdi->capacity))
+ return -EINVAL;
+ return cdrom_slot_status(cdi, arg);
+}
+
+/*
+ * Ok, this is where problems start. The current interface for the
+ * CDROM_DISC_STATUS ioctl is flawed. It makes the false assumption that
+ * CDs are all CDS_DATA_1 or all CDS_AUDIO, etc. Unfortunatly, while this
+ * is often the case, it is also very common for CDs to have some tracks
+ * with data, and some tracks with audio. Just because I feel like it,
+ * I declare the following to be the best way to cope. If the CD has ANY
+ * data tracks on it, it will be returned as a data CD. If it has any XA
+ * tracks, I will return it as that. Now I could simplify this interface
+ * by combining these returns with the above, but this more clearly
+ * demonstrates the problem with the current interface. Too bad this
+ * wasn't designed to use bitmasks... -Erik
+ *
+ * Well, now we have the option CDS_MIXED: a mixed-type CD.
+ * User level programmers might feel the ioctl is not very useful.
+ * ---david
+ */
+static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi)
+{
+ tracktype tracks;
+
+ cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n");
+
+ cdrom_count_tracks(cdi, &tracks);
+ if (tracks.error)
+ return tracks.error;
+
+ /* Policy mode on */
+ if (tracks.audio > 0) {
+ if (!tracks.data && !tracks.cdi && !tracks.xa)
+ return CDS_AUDIO;
+ else
+ return CDS_MIXED;
+ }
+
+ if (tracks.cdi > 0)
+ return CDS_XA_2_2;
+ if (tracks.xa > 0)
+ return CDS_XA_2_1;
+ if (tracks.data > 0)
+ return CDS_DATA_1;
+ /* Policy mode off */
+
+ cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n");
+ return CDS_NO_INFO;
+}
+
+static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi)
+{
+ cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n");
+ return cdi->capacity;
+}
+
+static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_subchnl q;
+ u8 requested, back;
+ int ret;
+
+ /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/
+
+ if (copy_from_user(&q, argp, sizeof(q)))
+ return -EFAULT;
+
+ requested = q.cdsc_format;
+ if (requested != CDROM_MSF && requested != CDROM_LBA)
+ return -EINVAL;
+ q.cdsc_format = CDROM_MSF;
+
+ ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q);
+ if (ret)
+ return ret;
+
+ back = q.cdsc_format; /* local copy */
+ sanitize_format(&q.cdsc_absaddr, &back, requested);
+ sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
+
+ if (copy_to_user(argp, &q, sizeof(q)))
+ return -EFAULT;
+ /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
+ return 0;
+}
+
+static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_tochdr header;
+ int ret;
+
+ /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
+
+ if (copy_from_user(&header, argp, sizeof(header)))
+ return -EFAULT;
+
+ ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(argp, &header, sizeof(header)))
+ return -EFAULT;
+ /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */
+ return 0;
+}
+
+static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_tocentry entry;
+ u8 requested_format;
+ int ret;
+
+ /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
+
+ if (copy_from_user(&entry, argp, sizeof(entry)))
+ return -EFAULT;
+
+ requested_format = entry.cdte_format;
+ if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
+ return -EINVAL;
+ /* make interface to low-level uniform */
+ entry.cdte_format = CDROM_MSF;
+ ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry);
+ if (ret)
+ return ret;
+ sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format);
+
+ if (copy_to_user(argp, &entry, sizeof(entry)))
+ return -EFAULT;
+ /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
+ return 0;
+}
+
+static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_msf msf;
+
+ cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+ if (copy_from_user(&msf, argp, sizeof(msf)))
+ return -EFAULT;
+ return cdi->ops->audio_ioctl(cdi, CDROMPLAYMSF, &msf);
+}
+
+static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_ti ti;
+ int ret;
+
+ cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+ if (copy_from_user(&ti, argp, sizeof(ti)))
+ return -EFAULT;
+
+ ret = check_for_audio_disc(cdi, cdi->ops);
+ if (ret)
+ return ret;
+ return cdi->ops->audio_ioctl(cdi, CDROMPLAYTRKIND, &ti);
+}
+static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_volctrl volume;
+
+ cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+ if (copy_from_user(&volume, argp, sizeof(volume)))
+ return -EFAULT;
+ return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume);
+}
+
+static int cdrom_ioctl_volread(struct cdrom_device_info *cdi,
+ void __user *argp)
+{
+ struct cdrom_volctrl volume;
+ int ret;
+
+ cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+
+ ret = cdi->ops->audio_ioctl(cdi, CDROMVOLREAD, &volume);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(argp, &volume, sizeof(volume)))
+ return -EFAULT;
+ return 0;
+}
+
+static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
+ unsigned int cmd)
+{
+ int ret;
+
+ cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n");
+
+ if (!CDROM_CAN(CDC_PLAY_AUDIO))
+ return -ENOSYS;
+ ret = check_for_audio_disc(cdi, cdi->ops);
+ if (ret)
+ return ret;
+ return cdi->ops->audio_ioctl(cdi, cmd, NULL);
+}
+
+/*
+ * Just about every imaginable ioctl is supported in the Uniform layer
+ * these days.
+ * ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
+ */
+int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
+ fmode_t mode, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int ret;
+ struct gendisk *disk = bdev->bd_disk;
+
+ /*
+ * Try the generic SCSI command ioctl's first.
+ */
+ ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
+ if (ret != -ENOTTY)
+ return ret;
+
+ switch (cmd) {
+ case CDROMMULTISESSION:
+ return cdrom_ioctl_multisession(cdi, argp);
+ case CDROMEJECT:
+ return cdrom_ioctl_eject(cdi);
+ case CDROMCLOSETRAY:
+ return cdrom_ioctl_closetray(cdi);
+ case CDROMEJECT_SW:
+ return cdrom_ioctl_eject_sw(cdi, arg);
+ case CDROM_MEDIA_CHANGED:
+ return cdrom_ioctl_media_changed(cdi, arg);
+ case CDROM_SET_OPTIONS:
+ return cdrom_ioctl_set_options(cdi, arg);
+ case CDROM_CLEAR_OPTIONS:
+ return cdrom_ioctl_clear_options(cdi, arg);
+ case CDROM_SELECT_SPEED:
+ return cdrom_ioctl_select_speed(cdi, arg);
+ case CDROM_SELECT_DISC:
+ return cdrom_ioctl_select_disc(cdi, arg);
+ case CDROMRESET:
+ return cdrom_ioctl_reset(cdi, bdev);
+ case CDROM_LOCKDOOR:
+ return cdrom_ioctl_lock_door(cdi, arg);
+ case CDROM_DEBUG:
+ return cdrom_ioctl_debug(cdi, arg);
+ case CDROM_GET_CAPABILITY:
+ return cdrom_ioctl_get_capability(cdi);
+ case CDROM_GET_MCN:
+ return cdrom_ioctl_get_mcn(cdi, argp);
+ case CDROM_DRIVE_STATUS:
+ return cdrom_ioctl_drive_status(cdi, arg);
+ case CDROM_DISC_STATUS:
+ return cdrom_ioctl_disc_status(cdi);
+ case CDROM_CHANGER_NSLOTS:
+ return cdrom_ioctl_changer_nslots(cdi);
+ }
+
+ /*
+ * Use the ioctls that are implemented through the generic_packet()
+ * interface. this may look at bit funny, but if -ENOTTY is
+ * returned that particular ioctl is not implemented and we
+ * let it go through the device specific ones.
+ */
+ if (CDROM_CAN(CDC_GENERIC_PACKET)) {
+ ret = mmc_ioctl(cdi, cmd, arg);
+ if (ret != -ENOTTY)
+ return ret;
+ }
+
+ /*
+ * Note: most of the cdinfo() calls are commented out here,
+ * because they fill up the sys log when CD players poll
+ * the drive.
+ */
+ switch (cmd) {
+ case CDROMSUBCHNL:
+ return cdrom_ioctl_get_subchnl(cdi, argp);
+ case CDROMREADTOCHDR:
+ return cdrom_ioctl_read_tochdr(cdi, argp);
+ case CDROMREADTOCENTRY:
+ return cdrom_ioctl_read_tocentry(cdi, argp);
+ case CDROMPLAYMSF:
+ return cdrom_ioctl_play_msf(cdi, argp);
+ case CDROMPLAYTRKIND:
+ return cdrom_ioctl_play_trkind(cdi, argp);
+ case CDROMVOLCTRL:
+ return cdrom_ioctl_volctrl(cdi, argp);
+ case CDROMVOLREAD:
+ return cdrom_ioctl_volread(cdi, argp);
+ case CDROMSTART:
+ case CDROMSTOP:
+ case CDROMPAUSE:
+ case CDROMRESUME:
+ return cdrom_ioctl_audioctl(cdi, cmd);
+ }
+
+ return -ENOSYS;
+}
+
+/*
+ * Required when we need to use READ_10 to issue other than 2048 block
+ * reads
+ */
+static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ struct packet_command cgc;
+ struct modesel_head mh;
+
+ memset(&mh, 0, sizeof(mh));
+ mh.block_desc_length = 0x08;
+ mh.block_length_med = (size >> 8) & 0xff;
+ mh.block_length_lo = size & 0xff;
+
+ memset(&cgc, 0, sizeof(cgc));
+ cgc.cmd[0] = 0x15;
+ cgc.cmd[1] = 1 << 4;
+ cgc.cmd[4] = 12;
+ cgc.buflen = sizeof(mh);
+ cgc.buffer = (char *) &mh;
+ cgc.data_direction = CGC_DATA_WRITE;
+ mh.block_desc_length = 0x08;
+ mh.block_length_med = (size >> 8) & 0xff;
+ mh.block_length_lo = size & 0xff;
+
+ return cdo->generic_packet(cdi, &cgc);
+}
+
+static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc,
+ int cmd)
+{
+ struct request_sense sense;
+ struct cdrom_msf msf;
+ int blocksize = 0, format = 0, lba;
+ int ret;
+
+ switch (cmd) {
+ case CDROMREADRAW:
+ blocksize = CD_FRAMESIZE_RAW;
+ break;
+ case CDROMREADMODE1:
+ blocksize = CD_FRAMESIZE;
+ format = 2;
+ break;
+ case CDROMREADMODE2:
+ blocksize = CD_FRAMESIZE_RAW0;
+ break;
+ }
+ IOCTL_IN(arg, struct cdrom_msf, msf);
+ lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0);
+ /* FIXME: we need upper bound checking, too!! */
+ if (lba < 0)
+ return -EINVAL;
+
+ cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
+ if (cgc->buffer == NULL)
+ return -ENOMEM;
+
+ memset(&sense, 0, sizeof(sense));
+ cgc->sense = &sense;
+ cgc->data_direction = CGC_DATA_READ;
+ ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
+ if (ret && sense.sense_key == 0x05 &&
+ sense.asc == 0x20 &&
+ sense.ascq == 0x00) {
+ /*
+ * SCSI-II devices are not required to support
+ * READ_CD, so let's try switching block size
+ */
+ /* FIXME: switch back again... */
+ ret = cdrom_switch_blocksize(cdi, blocksize);
+ if (ret)
+ goto out;
+ cgc->sense = NULL;
+ ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
+ ret |= cdrom_switch_blocksize(cdi, blocksize);
+ }
+ if (!ret && copy_to_user(arg, cgc->buffer, blocksize))
+ ret = -EFAULT;
+out:
+ kfree(cgc->buffer);
+ return ret;
+}
+
+static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ struct cdrom_read_audio ra;
+ int lba;
+
+ IOCTL_IN(arg, struct cdrom_read_audio, ra);
+
+ if (ra.addr_format == CDROM_MSF)
+ lba = msf_to_lba(ra.addr.msf.minute,
+ ra.addr.msf.second,
+ ra.addr.msf.frame);
+ else if (ra.addr_format == CDROM_LBA)
+ lba = ra.addr.lba;
+ else
+ return -EINVAL;
+
+ /* FIXME: we need upper bound checking, too!! */
+ if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
+ return -EINVAL;
+
+ return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
+}
+
+static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ int ret;
+ struct cdrom_subchnl q;
+ u_char requested, back;
+ IOCTL_IN(arg, struct cdrom_subchnl, q);
+ requested = q.cdsc_format;
+ if (!((requested == CDROM_MSF) ||
+ (requested == CDROM_LBA)))
+ return -EINVAL;
+ q.cdsc_format = CDROM_MSF;
+ ret = cdrom_read_subchannel(cdi, &q, 0);
+ if (ret)
+ return ret;
+ back = q.cdsc_format; /* local copy */
+ sanitize_format(&q.cdsc_absaddr, &back, requested);
+ sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
+ IOCTL_OUT(arg, struct cdrom_subchnl, q);
+ /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
+ return 0;
+}
+
+static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ struct cdrom_msf msf;
+ cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
+ IOCTL_IN(arg, struct cdrom_msf, msf);
+ cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF;
+ cgc->cmd[3] = msf.cdmsf_min0;
+ cgc->cmd[4] = msf.cdmsf_sec0;
+ cgc->cmd[5] = msf.cdmsf_frame0;
+ cgc->cmd[6] = msf.cdmsf_min1;
+ cgc->cmd[7] = msf.cdmsf_sec1;
+ cgc->cmd[8] = msf.cdmsf_frame1;
+ cgc->data_direction = CGC_DATA_NONE;
+ return cdo->generic_packet(cdi, cgc);
+}
+
+static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ struct cdrom_blk blk;
+ cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
+ IOCTL_IN(arg, struct cdrom_blk, blk);
+ cgc->cmd[0] = GPCMD_PLAY_AUDIO_10;
+ cgc->cmd[2] = (blk.from >> 24) & 0xff;
+ cgc->cmd[3] = (blk.from >> 16) & 0xff;
+ cgc->cmd[4] = (blk.from >> 8) & 0xff;
+ cgc->cmd[5] = blk.from & 0xff;
+ cgc->cmd[7] = (blk.len >> 8) & 0xff;
+ cgc->cmd[8] = blk.len & 0xff;
+ cgc->data_direction = CGC_DATA_NONE;
+ return cdo->generic_packet(cdi, cgc);
+}
+
+static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc,
+ unsigned int cmd)
+{
+ struct cdrom_volctrl volctrl;
+ unsigned char buffer[32];
+ char mask[sizeof(buffer)];
+ unsigned short offset;
+ int ret;
+
+ cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
+
+ IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
+
+ cgc->buffer = buffer;
+ cgc->buflen = 24;
+ ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
+ if (ret)
+ return ret;
+
+ /* originally the code depended on buffer[1] to determine
+ how much data is available for transfer. buffer[1] is
+ unfortunately ambigious and the only reliable way seem
+ to be to simply skip over the block descriptor... */
+ offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
+
+ if (offset + 16 > sizeof(buffer))
+ return -E2BIG;
+
+ if (offset + 16 > cgc->buflen) {
+ cgc->buflen = offset + 16;
+ ret = cdrom_mode_sense(cdi, cgc,
+ GPMODE_AUDIO_CTL_PAGE, 0);
+ if (ret)
+ return ret;
+ }
+
+ /* sanity check */
+ if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
+ buffer[offset + 1] < 14)
+ return -EINVAL;
+
+ /* now we have the current volume settings. if it was only
+ a CDROMVOLREAD, return these values */
+ if (cmd == CDROMVOLREAD) {
+ volctrl.channel0 = buffer[offset+9];
+ volctrl.channel1 = buffer[offset+11];
+ volctrl.channel2 = buffer[offset+13];
+ volctrl.channel3 = buffer[offset+15];
+ IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
+ return 0;
+ }
+
+ /* get the volume mask */
+ cgc->buffer = mask;
+ ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
+ if (ret)
+ return ret;
+
+ buffer[offset + 9] = volctrl.channel0 & mask[offset + 9];
+ buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
+ buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
+ buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
+
+ /* set volume */
+ cgc->buffer = buffer + offset - 8;
+ memset(cgc->buffer, 0, 8);
+ return cdrom_mode_select(cdi, cgc);
+}
+
+static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
+ struct packet_command *cgc,
+ int cmd)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
+ cgc->cmd[0] = GPCMD_START_STOP_UNIT;
+ cgc->cmd[1] = 1;
+ cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
+ cgc->data_direction = CGC_DATA_NONE;
+ return cdo->generic_packet(cdi, cgc);
+}
+
+static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
+ struct packet_command *cgc,
+ int cmd)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
+ cgc->cmd[0] = GPCMD_PAUSE_RESUME;
+ cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
+ cgc->data_direction = CGC_DATA_NONE;
+ return cdo->generic_packet(cdi, cgc);
+}
+
+static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi,
+ void __user *arg,
+ struct packet_command *cgc)
+{
+ int ret;
+ dvd_struct *s;
+ int size = sizeof(dvd_struct);
+
+ if (!CDROM_CAN(CDC_DVD))
+ return -ENOSYS;
+
+ s = kmalloc(size, GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
+ if (copy_from_user(s, arg, size)) {
+ kfree(s);
+ return -EFAULT;
+ }
+
+ ret = dvd_read_struct(cdi, s, cgc);
+ if (ret)
+ goto out;
+
+ if (copy_to_user(arg, s, size))
+ ret = -EFAULT;
+out:
+ kfree(s);
+ return ret;
+}
+
+static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ int ret;
+ dvd_authinfo ai;
+ if (!CDROM_CAN(CDC_DVD))
+ return -ENOSYS;
+ cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
+ IOCTL_IN(arg, dvd_authinfo, ai);
+ ret = dvd_do_auth(cdi, &ai);
+ if (ret)
+ return ret;
+ IOCTL_OUT(arg, dvd_authinfo, ai);
+ return 0;
+}
+
+static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ int ret;
+ long next = 0;
+ cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
+ ret = cdrom_get_next_writable(cdi, &next);
+ if (ret)
+ return ret;
+ IOCTL_OUT(arg, long, next);
+ return 0;
+}
+
+static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
+ void __user *arg)
+{
+ int ret;
+ long last = 0;
+ cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
+ ret = cdrom_get_last_written(cdi, &last);
+ if (ret)
+ return ret;
+ IOCTL_OUT(arg, long, last);
+ return 0;
+}
+
+static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
+ unsigned long arg)
+{
+ struct packet_command cgc;
+ void __user *userptr = (void __user *)arg;
+
+ memset(&cgc, 0, sizeof(cgc));
+
+ /* build a unified command and queue it through
+ cdo->generic_packet() */
+ switch (cmd) {
+ case CDROMREADRAW:
+ case CDROMREADMODE1:
+ case CDROMREADMODE2:
+ return mmc_ioctl_cdrom_read_data(cdi, userptr, &cgc, cmd);
+ case CDROMREADAUDIO:
+ return mmc_ioctl_cdrom_read_audio(cdi, userptr);
+ case CDROMSUBCHNL:
+ return mmc_ioctl_cdrom_subchannel(cdi, userptr);
+ case CDROMPLAYMSF:
+ return mmc_ioctl_cdrom_play_msf(cdi, userptr, &cgc);
+ case CDROMPLAYBLK:
+ return mmc_ioctl_cdrom_play_blk(cdi, userptr, &cgc);
+ case CDROMVOLCTRL:
+ case CDROMVOLREAD:
+ return mmc_ioctl_cdrom_volume(cdi, userptr, &cgc, cmd);
+ case CDROMSTART:
+ case CDROMSTOP:
+ return mmc_ioctl_cdrom_start_stop(cdi, &cgc, cmd);
+ case CDROMPAUSE:
+ case CDROMRESUME:
+ return mmc_ioctl_cdrom_pause_resume(cdi, &cgc, cmd);
+ case DVD_READ_STRUCT:
+ return mmc_ioctl_dvd_read_struct(cdi, userptr, &cgc);
+ case DVD_AUTH:
+ return mmc_ioctl_dvd_auth(cdi, userptr);
+ case CDROM_NEXT_WRITABLE:
+ return mmc_ioctl_cdrom_next_writable(cdi, userptr);
+ case CDROM_LAST_WRITTEN:
+ return mmc_ioctl_cdrom_last_written(cdi, userptr);
+ }
+
+ return -ENOTTY;
+}
+
+static int cdrom_get_track_info(struct cdrom_device_info *cdi, __u16 track, __u8 type,
+ track_information *ti)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ struct packet_command cgc;
+ int ret, buflen;
+
+ init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
+ cgc.cmd[1] = type & 3;
+ cgc.cmd[4] = (track & 0xff00) >> 8;
+ cgc.cmd[5] = track & 0xff;
+ cgc.cmd[8] = 8;
+ cgc.quiet = 1;
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ buflen = be16_to_cpu(ti->track_information_length) +
+ sizeof(ti->track_information_length);
+
+ if (buflen > sizeof(track_information))
+ buflen = sizeof(track_information);
+
+ cgc.cmd[8] = cgc.buflen = buflen;
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ /* return actual fill size */
+ return buflen;
+}
+
+/* requires CD R/RW */
+static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di)
+{
+ struct cdrom_device_ops *cdo = cdi->ops;
+ struct packet_command cgc;
+ int ret, buflen;
+
+ /* set up command and get the disc info */
+ init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
+ cgc.cmd[0] = GPCMD_READ_DISC_INFO;
+ cgc.cmd[8] = cgc.buflen = 2;
+ cgc.quiet = 1;
+
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ /* not all drives have the same disc_info length, so requeue
+ * packet with the length the drive tells us it can supply
+ */
+ buflen = be16_to_cpu(di->disc_information_length) +
+ sizeof(di->disc_information_length);
+
+ if (buflen > sizeof(disc_information))
+ buflen = sizeof(disc_information);
+
+ cgc.cmd[8] = cgc.buflen = buflen;
+ if ((ret = cdo->generic_packet(cdi, &cgc)))
+ return ret;
+
+ /* return actual fill size */
+ return buflen;
+}
+
+/* return the last written block on the CD-R media. this is for the udf
+ file system. */
+int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
+{
+ struct cdrom_tocentry toc;
+ disc_information di;
+ track_information ti;
+ __u32 last_track;
+ int ret = -1, ti_size;
+
+ if (!CDROM_CAN(CDC_GENERIC_PACKET))
+ goto use_toc;
+
+ ret = cdrom_get_disc_info(cdi, &di);
+ if (ret < (int)(offsetof(typeof(di), last_track_lsb)
+ + sizeof(di.last_track_lsb)))
+ goto use_toc;
+
+ /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
+ last_track = (di.last_track_msb << 8) | di.last_track_lsb;
+ ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
+ if (ti_size < (int)offsetof(typeof(ti), track_start))
+ goto use_toc;
+
+ /* if this track is blank, try the previous. */
+ if (ti.blank) {
+ if (last_track==1)
+ goto use_toc;
+ last_track--;
+ ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
+ }
+
+ if (ti_size < (int)(offsetof(typeof(ti), track_size)
+ + sizeof(ti.track_size)))
+ goto use_toc;
+
+ /* if last recorded field is valid, return it. */
+ if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address)
+ + sizeof(ti.last_rec_address))) {
+ *last_written = be32_to_cpu(ti.last_rec_address);
+ } else {
+ /* make it up instead */
+ *last_written = be32_to_cpu(ti.track_start) +
+ be32_to_cpu(ti.track_size);
+ if (ti.free_blocks)
+ *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
+ }
+ return 0;
+
+ /* this is where we end up if the drive either can't do a
+ GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if
+ it doesn't give enough information or fails. then we return
+ the toc contents. */
+use_toc:
+ toc.cdte_format = CDROM_MSF;
+ toc.cdte_track = CDROM_LEADOUT;
+ if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))
+ return ret;
+ sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA);
+ *last_written = toc.cdte_addr.lba;
+ return 0;
+}
+
+/* return the next writable block. also for udf file system. */
+static int cdrom_get_next_writable(struct cdrom_device_info *cdi, long *next_writable)
+{
+ disc_information di;
+ track_information ti;
+ __u16 last_track;
+ int ret, ti_size;
+
+ if (!CDROM_CAN(CDC_GENERIC_PACKET))
+ goto use_last_written;
+
+ ret = cdrom_get_disc_info(cdi, &di);
+ if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb)
+ + sizeof(di.last_track_lsb))
+ goto use_last_written;
+
+ /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */
+ last_track = (di.last_track_msb << 8) | di.last_track_lsb;
+ ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
+ if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start))
+ goto use_last_written;
+
+ /* if this track is blank, try the previous. */
+ if (ti.blank) {
+ if (last_track == 1)
+ goto use_last_written;
+ last_track--;
+ ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti);
+ if (ti_size < 0)
+ goto use_last_written;
+ }
+
+ /* if next recordable address field is valid, use it. */
+ if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable)
+ + sizeof(ti.next_writable)) {
+ *next_writable = be32_to_cpu(ti.next_writable);
+ return 0;
+ }
+
+use_last_written:
+ if ((ret = cdrom_get_last_written(cdi, next_writable))) {
+ *next_writable = 0;
+ return ret;
+ } else {
+ *next_writable += 7;
+ return 0;
+ }
+}
+
+EXPORT_SYMBOL(cdrom_get_last_written);
+EXPORT_SYMBOL(register_cdrom);
+EXPORT_SYMBOL(unregister_cdrom);
+EXPORT_SYMBOL(cdrom_open);
+EXPORT_SYMBOL(cdrom_release);
+EXPORT_SYMBOL(cdrom_ioctl);
+EXPORT_SYMBOL(cdrom_media_changed);
+EXPORT_SYMBOL(cdrom_number_of_slots);
+EXPORT_SYMBOL(cdrom_mode_select);
+EXPORT_SYMBOL(cdrom_mode_sense);
+EXPORT_SYMBOL(init_cdrom_command);
+EXPORT_SYMBOL(cdrom_get_media_event);
+
+#ifdef CONFIG_SYSCTL
+
+#define CDROM_STR_SIZE 1000
+
+static struct cdrom_sysctl_settings {
+ char info[CDROM_STR_SIZE]; /* general info */
+ int autoclose; /* close tray upon mount, etc */
+ int autoeject; /* eject on umount */
+ int debug; /* turn on debugging messages */
+ int lock; /* lock the door on device open */
+ int check; /* check media type */
+} cdrom_sysctl_settings;
+
+enum cdrom_print_option {
+ CTL_NAME,
+ CTL_SPEED,
+ CTL_SLOTS,
+ CTL_CAPABILITY
+};
+
+static int cdrom_print_info(const char *header, int val, char *info,
+ int *pos, enum cdrom_print_option option)
+{
+ const int max_size = sizeof(cdrom_sysctl_settings.info);
+ struct cdrom_device_info *cdi;
+ int ret;
+
+ ret = scnprintf(info + *pos, max_size - *pos, header);
+ if (!ret)
+ return 1;
+
+ *pos += ret;
+
+ list_for_each_entry(cdi, &cdrom_list, list) {
+ switch (option) {
+ case CTL_NAME:
+ ret = scnprintf(info + *pos, max_size - *pos,
+ "\t%s", cdi->name);
+ break;
+ case CTL_SPEED:
+ ret = scnprintf(info + *pos, max_size - *pos,
+ "\t%d", cdi->speed);
+ break;
+ case CTL_SLOTS:
+ ret = scnprintf(info + *pos, max_size - *pos,
+ "\t%d", cdi->capacity);
+ break;
+ case CTL_CAPABILITY:
+ ret = scnprintf(info + *pos, max_size - *pos,
+ "\t%d", CDROM_CAN(val) != 0);
+ break;
+ default:
+ printk(KERN_INFO "cdrom: invalid option%d\n", option);
+ return 1;
+ }
+ if (!ret)
+ return 1;
+ *pos += ret;
+ }
+
+ return 0;
+}
+
+static int cdrom_sysctl_info(ctl_table *ctl, int write, struct file * filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int pos;
+ char *info = cdrom_sysctl_settings.info;
+ const int max_size = sizeof(cdrom_sysctl_settings.info);
+
+ if (!*lenp || (*ppos && !write)) {
+ *lenp = 0;
+ return 0;
+ }
+
+ mutex_lock(&cdrom_mutex);
+
+ pos = sprintf(info, "CD-ROM information, " VERSION "\n");
+
+ if (cdrom_print_info("\ndrive name:\t", 0, info, &pos, CTL_NAME))
+ goto done;
+ if (cdrom_print_info("\ndrive speed:\t", 0, info, &pos, CTL_SPEED))
+ goto done;
+ if (cdrom_print_info("\ndrive # of slots:", 0, info, &pos, CTL_SLOTS))
+ goto done;
+ if (cdrom_print_info("\nCan close tray:\t",
+ CDC_CLOSE_TRAY, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan open tray:\t",
+ CDC_OPEN_TRAY, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan lock tray:\t",
+ CDC_LOCK, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan change speed:",
+ CDC_SELECT_SPEED, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan select disk:",
+ CDC_SELECT_DISC, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan read multisession:",
+ CDC_MULTI_SESSION, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan read MCN:\t",
+ CDC_MCN, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nReports media changed:",
+ CDC_MEDIA_CHANGED, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan play audio:\t",
+ CDC_PLAY_AUDIO, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan write CD-R:\t",
+ CDC_CD_R, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan write CD-RW:",
+ CDC_CD_RW, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan read DVD:\t",
+ CDC_DVD, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan write DVD-R:",
+ CDC_DVD_R, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan write DVD-RAM:",
+ CDC_DVD_RAM, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan read MRW:\t",
+ CDC_MRW, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan write MRW:\t",
+ CDC_MRW_W, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (cdrom_print_info("\nCan write RAM:\t",
+ CDC_RAM, info, &pos, CTL_CAPABILITY))
+ goto done;
+ if (!scnprintf(info + pos, max_size - pos, "\n\n"))
+ goto done;
+doit:
+ mutex_unlock(&cdrom_mutex);
+ return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
+done:
+ printk(KERN_INFO "cdrom: info buffer too small\n");
+ goto doit;
+}
+
+/* Unfortunately, per device settings are not implemented through
+ procfs/sysctl yet. When they are, this will naturally disappear. For now
+ just update all drives. Later this will become the template on which
+ new registered drives will be based. */
+static void cdrom_update_settings(void)
+{
+ struct cdrom_device_info *cdi;
+
+ mutex_lock(&cdrom_mutex);
+ list_for_each_entry(cdi, &cdrom_list, list) {
+ if (autoclose && CDROM_CAN(CDC_CLOSE_TRAY))
+ cdi->options |= CDO_AUTO_CLOSE;
+ else if (!autoclose)
+ cdi->options &= ~CDO_AUTO_CLOSE;
+ if (autoeject && CDROM_CAN(CDC_OPEN_TRAY))
+ cdi->options |= CDO_AUTO_EJECT;
+ else if (!autoeject)
+ cdi->options &= ~CDO_AUTO_EJECT;
+ if (lockdoor && CDROM_CAN(CDC_LOCK))
+ cdi->options |= CDO_LOCK;
+ else if (!lockdoor)
+ cdi->options &= ~CDO_LOCK;
+ if (check_media_type)
+ cdi->options |= CDO_CHECK_TYPE;
+ else
+ cdi->options &= ~CDO_CHECK_TYPE;
+ }
+ mutex_unlock(&cdrom_mutex);
+}
+
+static int cdrom_sysctl_handler(ctl_table *ctl, int write, struct file * filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+
+ ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
+
+ if (write) {
+
+ /* we only care for 1 or 0. */
+ autoclose = !!cdrom_sysctl_settings.autoclose;
+ autoeject = !!cdrom_sysctl_settings.autoeject;
+ debug = !!cdrom_sysctl_settings.debug;
+ lockdoor = !!cdrom_sysctl_settings.lock;
+ check_media_type = !!cdrom_sysctl_settings.check;
+
+ /* update the option flags according to the changes. we
+ don't have per device options through sysctl yet,
+ but we will have and then this will disappear. */
+ cdrom_update_settings();
+ }
+
+ return ret;
+}
+
+/* Place files in /proc/sys/dev/cdrom */
+static ctl_table cdrom_table[] = {
+ {
+ .procname = "info",
+ .data = &cdrom_sysctl_settings.info,
+ .maxlen = CDROM_STR_SIZE,
+ .mode = 0444,
+ .proc_handler = &cdrom_sysctl_info,
+ },
+ {
+ .procname = "autoclose",
+ .data = &cdrom_sysctl_settings.autoclose,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &cdrom_sysctl_handler,
+ },
+ {
+ .procname = "autoeject",
+ .data = &cdrom_sysctl_settings.autoeject,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &cdrom_sysctl_handler,
+ },
+ {
+ .procname = "debug",
+ .data = &cdrom_sysctl_settings.debug,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &cdrom_sysctl_handler,
+ },
+ {
+ .procname = "lock",
+ .data = &cdrom_sysctl_settings.lock,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &cdrom_sysctl_handler,
+ },
+ {
+ .procname = "check_media",
+ .data = &cdrom_sysctl_settings.check,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &cdrom_sysctl_handler
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table cdrom_cdrom_table[] = {
+ {
+ .ctl_name = DEV_CDROM,
+ .procname = "cdrom",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = cdrom_table,
+ },
+ { .ctl_name = 0 }
+};
+
+/* Make sure that /proc/sys/dev is there */
+static ctl_table cdrom_root_table[] = {
+ {
+ .ctl_name = CTL_DEV,
+ .procname = "dev",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = cdrom_cdrom_table,
+ },
+ { .ctl_name = 0 }
+};
+static struct ctl_table_header *cdrom_sysctl_header;
+
+static void cdrom_sysctl_register(void)
+{
+ static int initialized;
+
+ if (initialized == 1)
+ return;
+
+ cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
+
+ /* set the defaults */
+ cdrom_sysctl_settings.autoclose = autoclose;
+ cdrom_sysctl_settings.autoeject = autoeject;
+ cdrom_sysctl_settings.debug = debug;
+ cdrom_sysctl_settings.lock = lockdoor;
+ cdrom_sysctl_settings.check = check_media_type;
+
+ initialized = 1;
+}
+
+static void cdrom_sysctl_unregister(void)
+{
+ if (cdrom_sysctl_header)
+ unregister_sysctl_table(cdrom_sysctl_header);
+}
+
+#else /* CONFIG_SYSCTL */
+
+static void cdrom_sysctl_register(void)
+{
+}
+
+static void cdrom_sysctl_unregister(void)
+{
+}
+
+#endif /* CONFIG_SYSCTL */
+
+static int __init cdrom_init(void)
+{
+ cdrom_sysctl_register();
+
+ return 0;
+}
+
+static void __exit cdrom_exit(void)
+{
+ printk(KERN_INFO "Uniform CD-ROM driver unloaded\n");
+ cdrom_sysctl_unregister();
+}
+
+module_init(cdrom_init);
+module_exit(cdrom_exit);
+MODULE_LICENSE("GPL");
diff --git a/windhoek/components.c b/windhoek/components.c
new file mode 100644
index 00000000..d7d45f15
--- /dev/null
+++ b/windhoek/components.c
@@ -0,0 +1,407 @@
+/**
+ * \file windhoek/server/src/components.c
+ * \brief Windhoek IDL component implementation
+ *
+ * \date 2008-02-15
+ * \author Bjoern Doebel <doebel@tudos.org>
+ *
+ * (c) 2008 Technische Universitaet Dresden
+ * This file is part of DROPS, which is distributed under the terms of the
+ * GNU General Public License 2. Please see the COPYING file for details.
+ */
+
+#include <l4/log/l4log.h>
+#include <l4/l4rm/l4rm.h>
+#include <l4/dm_mem/dm_mem.h>
+
+#include <l4/dde/linux26/dde26_mem.h> // page cache functions
+#include <l4/dde/ddekit/pgtab.h> // set_region_with_size
+
+#include <linux/list.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/hdreg.h>
+#include <linux/mm.h>
+
+#include "windhoek_local.h"
+#include <l4/windhoek/server-internal-server.h>
+#include <l4/windhoek/server-internal-client.h>
+
+#define DEBUG 0
+
+struct client_state clients[MAX_CLIENTS];
+
+
+int get_free_client_state(void)
+{
+ int i;
+ int ret = -1;
+
+ for (i=0; i < MAX_CLIENTS; ++i) {
+ if (!client_state_valid(i)) {
+ ret = i;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+
+void free_client_state(int idx)
+{
+ /*
+ * clean up page struct mappings
+ *
+ * XXX: If this was time-critical, we could of course implement a
+ * page_cache_lookup_and_del() function, but freeing a client
+ * object won't happen that often.
+ */
+ int i;
+ for (i=0; i < clients[idx].data_size; i += PAGE_SIZE) {
+ struct page *p = dde_page_lookup((unsigned long)clients[idx].data_address + i);
+ if (p)
+ dde_page_cache_remove(p);
+ }
+
+ /*
+ * detach dataspace
+ */
+ l4rm_detach(clients[idx].data_address);
+ l4rm_detach(clients[idx].ctrl_address);
+
+ /*
+ * XXX: does l4rm_detach() also remove the user ptr???
+ */
+
+ /*
+ * remove cached address and gendisk
+ */
+ clients[idx].data_ds = L4DM_INVALID_DATASPACE;
+ clients[idx].data_address = NULL;
+ clients[idx].data_size = 0;
+ clients[idx].ctrl_ds = L4DM_INVALID_DATASPACE;
+ clients[idx].ctrl_address = NULL;
+ clients[idx].ctrl_size = 0;
+ clients[idx].gendisk = NULL;
+ clients[idx].client_id = L4_INVALID_ID;
+}
+
+
+static int init_clientstate(int idx, const l4dm_dataspace_t *ctrl, l4_size_t ctrl_size,
+ const l4dm_dataspace_t *data, l4_size_t data_size,
+ struct gendisk *disk, l4_threadid_t client)
+{
+ windhoek_device *dev = NULL;
+ struct hd_geometry geom;
+ struct block_device *bd = NULL;
+ int err, dummy;
+ ddekit_addr_t pa;
+
+ /*
+ * attach data and ctrl dataspaces
+ */
+ err = l4rm_attach(data, data_size, 0, L4DM_RW | L4RM_LOG2_ALIGNED | L4RM_MAP,
+ &clients[idx].data_address);
+ LOGd(DEBUG, "attach data to %p (size %d), err %d", clients[idx].data_address, data_size, err);
+ if (err) {
+ LOG_Error("cannot attach data area.");
+ free_client_state(idx);
+ return -ENOMEM;
+ }
+ memset(clients[idx].data_address, 0, data_size);
+
+ err = l4dm_mem_ds_phys_addr(data, 0, data_size, &pa, &dummy);
+ LOGd(DEBUG, "data phys addr: %p (err %d)", (void*)pa, err);
+ if (err) {
+ LOG_Error("cannot find phys addr for %p", clients[idx].data_address);
+ free_client_state(idx);
+ return -ENOMEM;
+ }
+
+ /*
+ * For the data area, we add a page struct for each page in the ds, so
+ * that Linux code may later on use it as it was normal Linux kernel
+ * memory.
+ */
+ for (dummy = 0; dummy < data_size; dummy += PAGE_SIZE) {
+ unsigned long va = (unsigned long)(clients[idx].data_address + dummy);
+ struct page *p = kmalloc(sizeof(struct page), GFP_KERNEL);
+ p->virtual = (void *)va;
+ //LOGd(DEBUG, "page %p -> %p", p, page_address(p));
+ dde_page_cache_add(p);
+ }
+
+ /*
+ * add virt->phys mapping for __pa()
+ */
+ ddekit_pgtab_set_region_with_size(clients[idx].data_address,
+ pa, data_size, PTE_TYPE_LARGE);
+
+
+ err = l4rm_attach(ctrl, ctrl_size, 0, L4DM_RW | L4RM_MAP,
+ &clients[idx].ctrl_address);
+ LOGd(DEBUG, "attach ctrl to %p, err %d", clients[idx].ctrl_address, err);
+ if (err) {
+ LOG_Error("cannot attach ctrl area.");
+ free_client_state(idx);
+ return -ENOMEM;
+ }
+
+ err = l4dm_mem_ds_phys_addr(ctrl, 0, ctrl_size, &pa, &dummy);
+ LOGd(DEBUG, "ctrl phys addr: %p (err %d)", (void*)pa, err);
+ if (err) {
+ LOG_Error("cannot find phys addr for %p", clients[idx].ctrl_address);
+ free_client_state(idx);
+ return -ENOMEM;
+ }
+
+ /*
+ * add virt->phys mapping for __pa()
+ */
+ ddekit_pgtab_set_region_with_size(clients[idx].ctrl_address, (ddekit_addr_t)pa, ctrl_size, PTE_TYPE_LARGE);
+
+ clients[idx].data_ds = *data;
+ clients[idx].data_size = data_size;
+ clients[idx].ctrl_ds = *ctrl;
+ clients[idx].ctrl_size = ctrl_size;
+ clients[idx].gendisk = disk;
+ clients[idx].client_id = client;
+
+ INIT_LIST_HEAD(&clients[idx].finished_bios);
+ clients[idx].notify_waiter = L4_INVALID_ID;
+ spin_lock_init(&clients[idx].lock);
+
+ /*
+ * Fill in the windhoek device at beginning of CTRL ds.
+ */
+ bd = bdget_disk(clients[idx].gendisk, 0);
+ err = clients[idx].gendisk->fops->getgeo(bd, &geom);
+
+ dev = (windhoek_device *)clients[idx].ctrl_address;
+ dev->sectors = geom.sectors;
+ dev->start_sector = 0;
+ dev->block_size = 512;
+ dev->heads = geom.heads;
+ dev->cylinders = geom.cylinders;
+ dev->flags = 0;
+ dev->capacity = get_capacity(clients[idx].gendisk);
+ strncpy(dev->name, disk->disk_name, MAX_NAME_LEN);
+ dev->handle = idx;
+
+ return 0;
+}
+
+
+int
+windhoek_client_open_component (CORBA_Object _dice_corba_obj,
+ const char* name /* in */,
+ const l4dm_dataspace_t *ctrl_ds /* in */,
+ l4_size_t ctrl_size /* in */,
+ const l4dm_dataspace_t *data_ds /* in */,
+ l4_size_t data_size /* in */,
+ CORBA_Server_Environment *_dice_corba_env)
+{
+ struct gendisk *disk;
+ int idx, err;
+ l4_size_t size;
+
+ LOGd(DEBUG, "name '%s'", name);
+ LOGd(DEBUG, "data ds: id 0x%03x, tid "l4util_idfmt, data_ds->id, l4util_idstr(data_ds->manager));
+ LOGd(DEBUG, "ctrl ds: id 0x%03x, tid "l4util_idfmt, ctrl_ds->id, l4util_idstr(ctrl_ds->manager));
+
+ /*
+ * find gendisk object
+ */
+ disk = find_disk_by_name(name);
+ LOGd(DEBUG, "disk @ %p", disk);
+
+ if (!disk) {
+ LOG_Error("No such disk '%s'", name);
+ return -EACCES;
+ }
+
+ idx = get_free_client_state();
+ if (idx < 0) {
+ LOG_Error("Out of client states...");
+ return -ENOMEM;
+ }
+
+ if ((err = init_clientstate(idx, ctrl_ds, ctrl_size, data_ds, data_size,
+ disk, *_dice_corba_obj)))
+ return err;
+
+ return idx;
+}
+
+
+int
+windhoek_client_close_component (CORBA_Object _dice_corba_obj,
+ int handle /* in */,
+ CORBA_Server_Environment *_dice_corba_env)
+{
+ SANITY_CHECK(error, handle, *_dice_corba_obj);
+
+ free_client_state(handle);
+
+ return 0;
+}
+
+
+static void bio_end_component(struct bio *bio, int err)
+{
+ windhoek_bio *wb = bio->bi_private;
+ int error;
+ DICE_DECLARE_ENV(env);
+
+ if (DEBUG)
+ printk("bio @ %p (handle %d) done.\n", bio, wb->handle);
+
+ // send notification
+ error = windhoek_server_notify_call(&main_thread, wb, &env);
+
+ /* XXX: bio cleanup ?*/
+}
+
+
+
+int windhoek_client_start_request_component(CORBA_Object _dice_corba_obj,
+ int handle,
+ unsigned long offset,
+ CORBA_Server_Environment *_dice_corba_env)
+{
+ struct client_state *c = &clients[handle];
+ struct block_device *bd = NULL;
+ unsigned long addr = 0;
+ struct bio *bio = NULL;
+ windhoek_bio *wbio = NULL;
+ int i = -1;
+
+ LOGd(DEBUG, "handle %d, offset %lx", handle, offset);
+ SANITY_CHECK(error, handle, *_dice_corba_obj);
+
+ bio = bio_alloc(GFP_NOIO, 1);
+ LOGd(DEBUG, "alloced bio: %p", bio);
+
+ bd = bdget_disk(clients[handle].gendisk, 0);
+
+ addr = (unsigned long)clients[handle].ctrl_address;
+ addr += offset;
+
+ wbio = (windhoek_bio *)addr;
+ LOGd(DEBUG, "wbio @ %p", wbio);
+
+ bio->bi_sector = wbio->start_sector;
+ bio->bi_bdev = bd;
+ bio->bi_vcnt = wbio->sg_count;
+ bio->bi_size = 0;
+ LOGd(DEBUG, "sector: %lx, dev %p, sg count %d", bio->bi_sector, bio->bi_bdev, bio->bi_vcnt);
+
+ for (i = 0; i < wbio->sg_count; ++i) {
+ unsigned long va = 0;
+ ds_list_element *dsle = &wbio->sg_data[i];
+ struct page *page = NULL;
+
+#if DEBUG
+ dump_ds_element(dsle);
+#endif
+
+ va = (unsigned long)clients[handle].data_address;
+ va += dsle->offset;
+ va &= PAGE_MASK;
+ LOGd(DEBUG, "va @ %lx", va);
+
+ page = dde_page_lookup(va);
+ LOGd(DEBUG, "page @ %p", page);
+ BUG_ON(!page);
+
+ bio->bi_io_vec[i].bv_page = page;
+ LOGd(DEBUG, "bi_io_vec[%d].bv_page = %p", i, page);
+ bio->bi_io_vec[i].bv_len = dsle->size;
+ bio->bi_io_vec[i].bv_offset = (unsigned long)clients[handle].data_address + dsle->offset - va;
+
+ bio->bi_size += dsle->size;
+ }
+
+ bio->bi_end_io = bio_end_component;
+ bio->bi_private = wbio;
+
+ bio_get(bio);
+ if (wbio->type == BIO_READ)
+ submit_bio(READ, bio);
+ else
+ submit_bio(WRITE, bio);
+ bio_put(bio);
+
+ return 0;
+}
+
+
+int
+windhoek_client_next_notification_component (CORBA_Object _dice_corba_obj,
+ int handle /* in */,
+ short *_dice_reply,
+ CORBA_Server_Environment *_dice_corba_env)
+{
+ int ret = 0;
+
+ SANITY_CHECK(error, handle, *_dice_corba_obj);
+
+ struct client_state *client = &clients[handle];
+
+ spin_lock(&client->lock);
+
+ /* No finished BIOs -> wait */
+ if (list_empty(&client->finished_bios)) {
+ client->notify_waiter = *_dice_corba_obj;
+ *_dice_reply = DICE_NO_REPLY;
+ }
+ /* have pending BIOs -> reply immediately */
+ else {
+ struct list_head *h = &client->finished_bios;
+ struct windhoek_bio_entry *be = list_entry(h, struct windhoek_bio_entry, next);
+ list_del(h);
+ ret = be->wbio->uid;
+ kfree(be);
+ }
+
+ spin_unlock(&client->lock);
+
+ return ret;
+}
+
+
+int
+windhoek_server_notify_component (CORBA_Object _dice_corba_obj,
+ const windhoek_bio *wbio /* in */,
+ CORBA_Server_Environment *_dice_corba_env)
+{
+ struct client_state *client = &clients[wbio->handle];
+
+ BUG_ON(!handle_valid(wbio->handle));
+ BUG_ON(!l4_task_equal(*_dice_corba_obj, l4_myself()));
+
+ spin_lock(&client->lock);
+
+ if (l4_is_invalid_id(client->notify_waiter)) {
+ struct windhoek_bio_entry *be = kmalloc(sizeof(*be), GFP_KERNEL);
+ be->wbio = wbio;
+ list_add_tail(&be->next, &client->finished_bios);
+ }
+ else {
+ DICE_DECLARE_SERVER_ENV(env);
+ windhoek_client_next_notification_reply(&client->notify_waiter, wbio->uid, &env);
+ client->notify_waiter = L4_INVALID_ID;
+ }
+
+ spin_unlock(&client->lock);
+
+ return 0;
+}
+
+
+void client_state_init(void)
+{
+ memset(clients, 0, sizeof(clients));
+}
diff --git a/windhoek/default.ld b/windhoek/default.ld
new file mode 100644
index 00000000..f8e4e28d
--- /dev/null
+++ b/windhoek/default.ld
@@ -0,0 +1,213 @@
+/* Script for -z combreloc: combine and sort reloc sections */
+OUTPUT_FORMAT("elf32-i386", "elf32-i386",
+ "elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(_start)
+SEARCH_DIR("/usr/i486-gnu/lib"); SEARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib");
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ PROVIDE (__executable_start = 0x08048000); . = 0x08048000 + SIZEOF_HEADERS;
+ .interp : { *(.interp) }
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.dyn :
+ {
+ *(.rel.init)
+ *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*)
+ *(.rel.fini)
+ *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*)
+ *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*)
+ *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*)
+ *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*)
+ *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*)
+ *(.rel.ctors)
+ *(.rel.dtors)
+ *(.rel.got)
+ *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*)
+ }
+ .rela.dyn :
+ {
+ *(.rela.init)
+ *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*)
+ *(.rela.fini)
+ *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*)
+ *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*)
+ *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*)
+ *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*)
+ *(.rela.ctors)
+ *(.rela.dtors)
+ *(.rela.got)
+ *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*)
+ }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init :
+ {
+ KEEP (*(.init))
+ } =0x90909090
+ .plt : { *(.plt) }
+ .text :
+ {
+ *(.text .stub .text.* .gnu.linkonce.t.*)
+ KEEP (*(.text.*personality*))
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ } =0x90909090
+ .fini :
+ {
+ KEEP (*(.fini))
+ } =0x90909090
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+ .rodata1 : { *(.rodata1) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE));
+ /* Exception handling */
+ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
+ .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
+ /* Thread Local Storage sections */
+ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
+ .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
+ .preinit_array :
+ {
+ PROVIDE_HIDDEN (__preinit_array_start = .);
+ KEEP (*(.preinit_array))
+ PROVIDE_HIDDEN (__preinit_array_end = .);
+ }
+ .init_array :
+ {
+ PROVIDE_HIDDEN (__init_array_start = .);
+ KEEP (*(SORT(.init_array.*)))
+ KEEP (*(.init_array))
+ PROVIDE_HIDDEN (__init_array_end = .);
+ }
+ .fini_array :
+ {
+ PROVIDE_HIDDEN (__fini_array_start = .);
+ KEEP (*(.fini_array))
+ KEEP (*(SORT(.fini_array.*)))
+ PROVIDE_HIDDEN (__fini_array_end = .);
+ }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ KEEP (*crtbegin?.o(.ctors))
+ /* We don't want to include the .ctor section from
+ the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors))
+
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+
+ KEEP (*(.mark_beg_l4dde_ctors))
+ KEEP (*(SORT(.l4dde_ctors.?)))
+ KEEP (*(SORT(.l4dde_ctors.??)))
+ KEEP (*(SORT(.l4dde_ctors.???)))
+ KEEP (*(SORT(.l4dde_ctors.????)))
+ KEEP (*(SORT(.l4dde_ctors.?????)))
+ KEEP (*(.l4dde_ctors))
+ KEEP (*(.mark_end_l4dde_ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*crtbegin?.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) }
+ .dynamic : { *(.dynamic) }
+ .got : { *(.got) }
+ . = DATA_SEGMENT_RELRO_END (12, .);
+ .got.plt : { *(.got.plt) }
+ .data :
+ {
+ *(.data .data.* .gnu.linkonce.d.*)
+ KEEP (*(.gnu.linkonce.d.*personality*))
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ _edata = .; PROVIDE (edata = .);
+ __bss_start = .;
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections.
+ FIXME: Why do we need it? When there is no .bss section, we don't
+ pad the .data section. */
+ . = ALIGN(. != 0 ? 32 / 8 : 1);
+ }
+ . = ALIGN(32 / 8);
+ . = ALIGN(32 / 8);
+ _end = .; PROVIDE (end = .);
+ . = DATA_SEGMENT_END (.);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* DWARF 3 */
+ .debug_pubtypes 0 : { *(.debug_pubtypes) }
+ .debug_ranges 0 : { *(.debug_ranges) }
+ .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) }
+ /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) }
+}
+
diff --git a/windhoek/genhd.c b/windhoek/genhd.c
new file mode 100644
index 00000000..5b6140f6
--- /dev/null
+++ b/windhoek/genhd.c
@@ -0,0 +1,132 @@
+/**
+ * \file windhoek/server/src/genhd.c
+ * \brief Windhoek genhd replacement
+ *
+ * \date 2008-01-29
+ * \author Bjoern Doebel <doebel@tudos.org>
+ *
+ * (c) 2008 Technische Universitaet Dresden
+ * This file is part of DROPS, which is distributed under the terms of the
+ * GNU General Public License 2. Please see the COPYING file for details.
+ */
+
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+#include <linux/ide.h>
+
+#include "windhoek_local.h"
+
+/*
+ * Array for storing all known block devices
+ */
+struct gendisk *known_disks[MAX_GENDISKS];
+
+static void print_gendisk(struct gendisk *gendisk)
+{
+ struct block_device *bdev = bdget_disk(gendisk, 0);
+ struct hd_geometry geom;
+
+ DEBUG_MSG("disk: %p (maj %x, min %x, max_min %x)", gendisk,
+ gendisk->major, gendisk->first_minor, gendisk->minors);
+ DEBUG_MSG("\tdisk name '%s'", gendisk->disk_name);
+// DEBUG_MSG("\tpartitions @ %p, fops @ %p", gendisk->part, gendisk->fops);
+ DEBUG_MSG("\trq @ %p, privdata @ %p", gendisk->queue, gendisk->private_data);
+ DEBUG_MSG("\tcapacity %d, flags %x, dev %p", get_capacity(gendisk), gendisk->flags,
+ gendisk->driverfs_dev);
+}
+
+
+void add_disk(struct gendisk *disk)
+{
+ int i, err;
+ dev_t devt;
+ struct block_device *bdev;
+
+ /* minors == 0 indicates to use ext devt from part0 and should
+ * be accompanied with EXT_DEVT flag. Make sure all
+ * parameters make sense.
+ */
+ WARN_ON(disk->minors && !(disk->major || disk->first_minor));
+ WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
+
+ for (i = 0; i < MAX_GENDISKS; ++i) {
+ if (known_disks[i] == NULL)
+ break;
+ }
+
+ print_gendisk(disk);
+ known_disks[i] = disk;
+
+ disk->flags |= GENHD_FL_UP;
+
+ err = blk_alloc_devt(&disk->part0, &devt);
+ if (err) {
+ WARN_ON(1);
+ return;
+ }
+ disk_to_dev(disk)->devt = devt;
+
+ /* ->major and ->first_minor aren't supposed to be
+ * dereferenced from here on, but set them just in case.
+ */
+ disk->major = MAJOR(devt);
+ disk->first_minor = MINOR(devt);
+
+// blk_register_region(disk_devt(disk), disk->minors, NULL,
+// exact_match, exact_lock, disk);
+ blk_register_queue(disk);
+
+ bdev = bdget_disk(disk, 0);
+ BUG_ON(!bdev);
+
+ bdev->bd_invalidated = 0;
+ err = blkdev_get(bdev, FMODE_READ);
+ BUG_ON(err);
+ //blkdev_put(bdev);
+}
+
+
+struct gendisk *get_gendisk(dev_t dev, int *part)
+{
+ int i;
+
+ // XXX: no partitions
+ *part = 0;
+
+ for (i = 0; i < MAX_GENDISKS; ++i) {
+ if (!known_disks[i])
+ continue;
+
+ if (MKDEV(known_disks[i]->major, known_disks[i]->first_minor) == dev)
+ return known_disks[i];
+ }
+
+ return NULL;
+}
+
+
+struct gendisk* find_disk_by_name(const char* name)
+{
+ struct gendisk *ret = NULL;
+ int i;
+
+ for (i = 0; i < MAX_GENDISKS; ++i) {
+ if (!known_disks[i])
+ continue;
+
+ if (!strcmp(known_disks[i]->disk_name, name)) {
+ ret = known_disks[i];
+ break;
+ }
+ }
+
+ return ret;
+}
+
+
+void gendisk_init()
+{
+ int i;
+ for (i = 0; i < MAX_GENDISKS; ++i)
+ known_disks[i] = NULL;
+}
diff --git a/windhoek/ide/ide-cd.h b/windhoek/ide/ide-cd.h
new file mode 100644
index 00000000..c878bfcf
--- /dev/null
+++ b/windhoek/ide/ide-cd.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 1996-98 Erik Andersen
+ * Copyright (C) 1998-2000 Jens Axboe
+ */
+#ifndef _IDE_CD_H
+#define _IDE_CD_H
+
+#include <linux/cdrom.h>
+#include <asm/byteorder.h>
+
+#define IDECD_DEBUG_LOG 0
+
+#if IDECD_DEBUG_LOG
+#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, args)
+#else
+#define ide_debug_log(lvl, fmt, args...) do {} while (0)
+#endif
+
+#define ATAPI_WAIT_WRITE_BUSY (10 * HZ)
+
+/************************************************************************/
+
+#define SECTOR_BITS 9
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_BITS)
+#endif
+#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_BITS)
+#define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32)
+
+/* Capabilities Page size including 8 bytes of Mode Page Header */
+#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20)
+#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4
+
+/* Structure of a MSF cdrom address. */
+struct atapi_msf {
+ u8 reserved;
+ u8 minute;
+ u8 second;
+ u8 frame;
+};
+
+/* Space to hold the disk TOC. */
+#define MAX_TRACKS 99
+struct atapi_toc_header {
+ unsigned short toc_length;
+ u8 first_track;
+ u8 last_track;
+};
+
+struct atapi_toc_entry {
+ u8 reserved1;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 adr : 4;
+ u8 control : 4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 control : 4;
+ u8 adr : 4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 track;
+ u8 reserved2;
+ union {
+ unsigned lba;
+ struct atapi_msf msf;
+ } addr;
+};
+
+struct atapi_toc {
+ int last_session_lba;
+ int xa_flag;
+ unsigned long capacity;
+ struct atapi_toc_header hdr;
+ struct atapi_toc_entry ent[MAX_TRACKS+1];
+ /* One extra for the leadout. */
+};
+
+/* Extra per-device info for cdrom drives. */
+struct cdrom_info {
+ ide_drive_t *drive;
+ struct ide_driver *driver;
+ struct gendisk *disk;
+ struct device dev;
+
+ /* Buffer for table of contents. NULL if we haven't allocated
+ a TOC buffer for this device yet. */
+
+ struct atapi_toc *toc;
+
+ /* The result of the last successful request sense command
+ on this device. */
+ struct request_sense sense_data;
+
+ struct request request_sense_request;
+
+ u8 max_speed; /* Max speed of the drive. */
+ u8 current_speed; /* Current speed of the drive. */
+
+ /* Per-device info needed by cdrom.c generic driver. */
+ struct cdrom_device_info devinfo;
+
+ unsigned long write_timeout;
+};
+
+/* ide-cd_verbose.c */
+void ide_cd_log_error(const char *, struct request *, struct request_sense *);
+
+/* ide-cd.c functions used by ide-cd_ioctl.c */
+int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
+ unsigned *, struct request_sense *, int, unsigned int);
+int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
+int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
+void ide_cdrom_update_speed(ide_drive_t *, u8 *);
+int cdrom_check_status(ide_drive_t *, struct request_sense *);
+
+/* ide-cd_ioctl.c */
+int ide_cdrom_open_real(struct cdrom_device_info *, int);
+void ide_cdrom_release_real(struct cdrom_device_info *);
+int ide_cdrom_drive_status(struct cdrom_device_info *, int);
+int ide_cdrom_check_media_change_real(struct cdrom_device_info *, int);
+int ide_cdrom_tray_move(struct cdrom_device_info *, int);
+int ide_cdrom_lock_door(struct cdrom_device_info *, int);
+int ide_cdrom_select_speed(struct cdrom_device_info *, int);
+int ide_cdrom_get_last_session(struct cdrom_device_info *,
+ struct cdrom_multisession *);
+int ide_cdrom_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
+int ide_cdrom_reset(struct cdrom_device_info *cdi);
+int ide_cdrom_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
+int ide_cdrom_packet(struct cdrom_device_info *, struct packet_command *);
+
+#endif /* _IDE_CD_H */
diff --git a/windhoek/ide/ide-disk.c b/windhoek/ide/ide-disk.c
new file mode 100644
index 00000000..24654e93
--- /dev/null
+++ b/windhoek/ide/ide-disk.c
@@ -0,0 +1,750 @@
+/*
+ * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
+ * Copyright (C) 1998-2002 Linux ATA Development
+ * Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat
+ * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
+ */
+
+/*
+ * Mostly written by Mark Lord <mlord@pobox.com>
+ * and Gadi Oxman <gadio@netvision.net.il>
+ * and Andre Hedrick <andre@linux-ide.org>
+ *
+ * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/leds.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/div64.h>
+
+#include "ide-disk.h"
+
+static const u8 ide_rw_cmds[] = {
+ ATA_CMD_READ_MULTI,
+ ATA_CMD_WRITE_MULTI,
+ ATA_CMD_READ_MULTI_EXT,
+ ATA_CMD_WRITE_MULTI_EXT,
+ ATA_CMD_PIO_READ,
+ ATA_CMD_PIO_WRITE,
+ ATA_CMD_PIO_READ_EXT,
+ ATA_CMD_PIO_WRITE_EXT,
+ ATA_CMD_READ,
+ ATA_CMD_WRITE,
+ ATA_CMD_READ_EXT,
+ ATA_CMD_WRITE_EXT,
+};
+
+static const u8 ide_data_phases[] = {
+ TASKFILE_MULTI_IN,
+ TASKFILE_MULTI_OUT,
+ TASKFILE_IN,
+ TASKFILE_OUT,
+ TASKFILE_IN_DMA,
+ TASKFILE_OUT_DMA,
+};
+
+static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
+{
+ u8 index, lba48, write;
+
+ lba48 = (task->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
+ write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
+
+ if (dma)
+ index = 8;
+ else
+ index = drive->mult_count ? 0 : 4;
+
+ task->tf.command = ide_rw_cmds[index + lba48 + write];
+
+ if (dma)
+ index = 8; /* fixup index */
+
+ task->data_phase = ide_data_phases[index / 2 + write];
+}
+
+/*
+ * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
+ * using LBA if supported, or CHS otherwise, to address sectors.
+ */
+static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+ sector_t block)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u16 nsectors = (u16)rq->nr_sectors;
+ u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
+ u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
+ ide_task_t task;
+ struct ide_taskfile *tf = &task.tf;
+ ide_startstop_t rc;
+
+ if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
+ if (block + rq->nr_sectors > 1ULL << 28)
+ dma = 0;
+ else
+ lba48 = 0;
+ }
+
+ if (!dma) {
+ ide_init_sg_cmd(drive, rq);
+ ide_map_sg(drive, rq);
+ }
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+
+ if (drive->dev_flags & IDE_DFLAG_LBA) {
+ if (lba48) {
+ pr_debug("%s: LBA=0x%012llx\n", drive->name,
+ (unsigned long long)block);
+
+ tf->hob_nsect = (nsectors >> 8) & 0xff;
+ tf->hob_lbal = (u8)(block >> 24);
+ if (sizeof(block) != 4) {
+ tf->hob_lbam = (u8)((u64)block >> 32);
+ tf->hob_lbah = (u8)((u64)block >> 40);
+ }
+
+ tf->nsect = nsectors & 0xff;
+ tf->lbal = (u8) block;
+ tf->lbam = (u8)(block >> 8);
+ tf->lbah = (u8)(block >> 16);
+
+ task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
+ } else {
+ tf->nsect = nsectors & 0xff;
+ tf->lbal = block;
+ tf->lbam = block >>= 8;
+ tf->lbah = block >>= 8;
+ tf->device = (block >> 8) & 0xf;
+ }
+
+ tf->device |= ATA_LBA;
+ } else {
+ unsigned int sect, head, cyl, track;
+
+ track = (int)block / drive->sect;
+ sect = (int)block % drive->sect + 1;
+ head = track % drive->head;
+ cyl = track / drive->head;
+
+ pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
+
+ tf->nsect = nsectors & 0xff;
+ tf->lbal = sect;
+ tf->lbam = cyl;
+ tf->lbah = cyl >> 8;
+ tf->device = head;
+ }
+
+ if (rq_data_dir(rq))
+ task.tf_flags |= IDE_TFLAG_WRITE;
+
+ ide_tf_set_cmd(drive, &task, dma);
+ if (!dma)
+ hwif->data_phase = task.data_phase;
+ task.rq = rq;
+
+ rc = do_rw_taskfile(drive, &task);
+
+ if (rc == ide_stopped && dma) {
+ /* fallback to PIO */
+ task.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
+ ide_tf_set_cmd(drive, &task, 0);
+ hwif->data_phase = task.data_phase;
+ ide_init_sg_cmd(drive, rq);
+ rc = do_rw_taskfile(drive, &task);
+ }
+
+ return rc;
+}
+
+/*
+ * 268435455 == 137439 MB or 28bit limit
+ * 320173056 == 163929 MB or 48bit addressing
+ * 1073741822 == 549756 MB or 48bit addressing fake drive
+ */
+
+static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+ sector_t block)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
+
+ if (!blk_fs_request(rq)) {
+ blk_dump_rq_flags(rq, "ide_do_rw_disk - bad command");
+ ide_end_request(drive, 0, 0);
+ return ide_stopped;
+ }
+
+ ledtrig_ide_activity();
+
+ pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
+ drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
+ (unsigned long long)block, rq->nr_sectors,
+ (unsigned long)rq->buffer);
+
+ if (hwif->rw_disk)
+ hwif->rw_disk(drive, rq);
+
+ return __ide_do_rw_disk(drive, rq, block);
+}
+
+/*
+ * Queries for true maximum capacity of the drive.
+ * Returns maximum LBA address (> 0) of the drive, 0 if failed.
+ */
+static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
+{
+ ide_task_t args;
+ struct ide_taskfile *tf = &args.tf;
+ u64 addr = 0;
+
+ /* Create IDE/ATA command request structure */
+ memset(&args, 0, sizeof(ide_task_t));
+ if (lba48)
+ tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
+ else
+ tf->command = ATA_CMD_READ_NATIVE_MAX;
+ tf->device = ATA_LBA;
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ if (lba48)
+ args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
+ /* submit command request */
+ ide_no_data_taskfile(drive, &args);
+
+ /* if OK, compute maximum address value */
+ if ((tf->status & 0x01) == 0)
+ addr = ide_get_lba_addr(tf, lba48) + 1;
+
+ return addr;
+}
+
+/*
+ * Sets maximum virtual LBA address of the drive.
+ * Returns new maximum virtual LBA address (> 0) or 0 on failure.
+ */
+static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
+{
+ ide_task_t args;
+ struct ide_taskfile *tf = &args.tf;
+ u64 addr_set = 0;
+
+ addr_req--;
+ /* Create IDE/ATA command request structure */
+ memset(&args, 0, sizeof(ide_task_t));
+ tf->lbal = (addr_req >> 0) & 0xff;
+ tf->lbam = (addr_req >>= 8) & 0xff;
+ tf->lbah = (addr_req >>= 8) & 0xff;
+ if (lba48) {
+ tf->hob_lbal = (addr_req >>= 8) & 0xff;
+ tf->hob_lbam = (addr_req >>= 8) & 0xff;
+ tf->hob_lbah = (addr_req >>= 8) & 0xff;
+ tf->command = ATA_CMD_SET_MAX_EXT;
+ } else {
+ tf->device = (addr_req >>= 8) & 0x0f;
+ tf->command = ATA_CMD_SET_MAX;
+ }
+ tf->device |= ATA_LBA;
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ if (lba48)
+ args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
+ /* submit command request */
+ ide_no_data_taskfile(drive, &args);
+ /* if OK, compute maximum address value */
+ if ((tf->status & 0x01) == 0)
+ addr_set = ide_get_lba_addr(tf, lba48) + 1;
+
+ return addr_set;
+}
+
+static unsigned long long sectors_to_MB(unsigned long long n)
+{
+ n <<= 9; /* make it bytes */
+ do_div(n, 1000000); /* make it MB */
+ return n;
+}
+
+/*
+ * Some disks report total number of sectors instead of
+ * maximum sector address. We list them here.
+ */
+static const struct drive_list_entry hpa_list[] = {
+ { "ST340823A", NULL },
+ { "ST320413A", NULL },
+ { "ST310211A", NULL },
+ { NULL, NULL }
+};
+
+static void idedisk_check_hpa(ide_drive_t *drive)
+{
+ unsigned long long capacity, set_max;
+ int lba48 = ata_id_lba48_enabled(drive->id);
+
+ capacity = drive->capacity64;
+
+ set_max = idedisk_read_native_max_address(drive, lba48);
+
+ if (ide_in_drive_list(drive->id, hpa_list)) {
+ /*
+ * Since we are inclusive wrt to firmware revisions do this
+ * extra check and apply the workaround only when needed.
+ */
+ if (set_max == capacity + 1)
+ set_max--;
+ }
+
+ if (set_max <= capacity)
+ return;
+
+ printk(KERN_INFO "%s: Host Protected Area detected.\n"
+ "\tcurrent capacity is %llu sectors (%llu MB)\n"
+ "\tnative capacity is %llu sectors (%llu MB)\n",
+ drive->name,
+ capacity, sectors_to_MB(capacity),
+ set_max, sectors_to_MB(set_max));
+
+ set_max = idedisk_set_max_address(drive, set_max, lba48);
+
+ if (set_max) {
+ drive->capacity64 = set_max;
+ printk(KERN_INFO "%s: Host Protected Area disabled.\n",
+ drive->name);
+ }
+}
+
+static int ide_disk_get_capacity(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ int lba;
+
+ if (ata_id_lba48_enabled(id)) {
+ /* drive speaks 48-bit LBA */
+ lba = 1;
+ drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
+ } else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
+ /* drive speaks 28-bit LBA */
+ lba = 1;
+ drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
+ } else {
+ /* drive speaks boring old 28-bit CHS */
+ lba = 0;
+ drive->capacity64 = drive->cyl * drive->head * drive->sect;
+ }
+
+ if (lba) {
+ drive->dev_flags |= IDE_DFLAG_LBA;
+
+ /*
+ * If this device supports the Host Protected Area feature set,
+ * then we may need to change our opinion about its capacity.
+ */
+ if (ata_id_hpa_enabled(id))
+ idedisk_check_hpa(drive);
+ }
+
+ /* limit drive capacity to 137GB if LBA48 cannot be used */
+ if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
+ drive->capacity64 > 1ULL << 28) {
+ printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
+ "%llu sectors (%llu MB)\n",
+ drive->name, (unsigned long long)drive->capacity64,
+ sectors_to_MB(drive->capacity64));
+ drive->capacity64 = 1ULL << 28;
+ }
+
+ if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
+ (drive->dev_flags & IDE_DFLAG_LBA48)) {
+ if (drive->capacity64 > 1ULL << 28) {
+ printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
+ " will be used for accessing sectors "
+ "> %u\n", drive->name, 1 << 28);
+ } else
+ drive->dev_flags &= ~IDE_DFLAG_LBA48;
+ }
+
+ return 0;
+}
+
+static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
+{
+ ide_drive_t *drive = q->queuedata;
+ ide_task_t *task = kmalloc(sizeof(*task), GFP_ATOMIC);
+
+ /* FIXME: map struct ide_taskfile on rq->cmd[] */
+ BUG_ON(task == NULL);
+
+ memset(task, 0, sizeof(*task));
+ if (ata_id_flush_ext_enabled(drive->id) &&
+ (drive->capacity64 >= (1UL << 28)))
+ task->tf.command = ATA_CMD_FLUSH_EXT;
+ else
+ task->tf.command = ATA_CMD_FLUSH;
+ task->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE |
+ IDE_TFLAG_DYN;
+ task->data_phase = TASKFILE_NO_DATA;
+
+ rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->special = task;
+}
+
+ide_devset_get(multcount, mult_count);
+
+/*
+ * This is tightly woven into the driver->do_special can not touch.
+ * DON'T do it again until a total personality rewrite is committed.
+ */
+static int set_multcount(ide_drive_t *drive, int arg)
+{
+ struct request *rq;
+ int error;
+
+ if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
+ return -EINVAL;
+
+ if (drive->special.b.set_multmode)
+ return -EBUSY;
+
+ rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+
+ drive->mult_req = arg;
+ drive->special.b.set_multmode = 1;
+ error = blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_put_request(rq);
+
+ return (drive->mult_count == arg) ? 0 : -EIO;
+}
+
+ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
+
+static int set_nowerr(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_NOWERR;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_NOWERR;
+
+ drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
+
+ return 0;
+}
+
+static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf.feature = feature;
+ task.tf.nsect = nsect;
+ task.tf.command = ATA_CMD_SET_FEATURES;
+ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+
+ return ide_no_data_taskfile(drive, &task);
+}
+
+static void update_ordered(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ unsigned ordered = QUEUE_ORDERED_NONE;
+ prepare_flush_fn *prep_fn = NULL;
+
+ if (drive->dev_flags & IDE_DFLAG_WCACHE) {
+ unsigned long long capacity;
+ int barrier;
+ /*
+ * We must avoid issuing commands a drive does not
+ * understand or we may crash it. We check flush cache
+ * is supported. We also check we have the LBA48 flush
+ * cache if the drive capacity is too large. By this
+ * time we have trimmed the drive capacity if LBA48 is
+ * not available so we don't need to recheck that.
+ */
+ capacity = ide_gd_capacity(drive);
+ barrier = ata_id_flush_enabled(id) &&
+ (drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
+ ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
+ capacity <= (1ULL << 28) ||
+ ata_id_flush_ext_enabled(id));
+
+ printk(KERN_INFO "%s: cache flushes %ssupported\n",
+ drive->name, barrier ? "" : "not ");
+
+ if (barrier) {
+ ordered = QUEUE_ORDERED_DRAIN_FLUSH;
+ prep_fn = idedisk_prepare_flush;
+ }
+ } else
+ ordered = QUEUE_ORDERED_DRAIN;
+
+ blk_queue_ordered(drive->queue, ordered, prep_fn);
+}
+
+ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
+
+static int set_wcache(ide_drive_t *drive, int arg)
+{
+ int err = 1;
+
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (ata_id_flush_enabled(drive->id)) {
+ err = ide_do_setfeature(drive,
+ arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
+ if (err == 0) {
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_WCACHE;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_WCACHE;
+ }
+ }
+
+ update_ordered(drive);
+
+ return err;
+}
+
+static int do_idedisk_flushcache(ide_drive_t *drive)
+{
+ ide_task_t args;
+
+ memset(&args, 0, sizeof(ide_task_t));
+ if (ata_id_flush_ext_enabled(drive->id))
+ args.tf.command = ATA_CMD_FLUSH_EXT;
+ else
+ args.tf.command = ATA_CMD_FLUSH;
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ return ide_no_data_taskfile(drive, &args);
+}
+
+ide_devset_get(acoustic, acoustic);
+
+static int set_acoustic(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 254)
+ return -EINVAL;
+
+ ide_do_setfeature(drive,
+ arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
+
+ drive->acoustic = arg;
+
+ return 0;
+}
+
+ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
+
+/*
+ * drive->addressing:
+ * 0: 28-bit
+ * 1: 48-bit
+ * 2: 48-bit capable doing 28-bit
+ */
+static int set_addressing(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 2)
+ return -EINVAL;
+
+ if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
+ ata_id_lba48_enabled(drive->id) == 0))
+ return -EIO;
+
+ if (arg == 2)
+ arg = 0;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_LBA48;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_LBA48;
+
+ return 0;
+}
+
+ide_ext_devset_rw(acoustic, acoustic);
+ide_ext_devset_rw(address, addressing);
+ide_ext_devset_rw(multcount, multcount);
+ide_ext_devset_rw(wcache, wcache);
+
+ide_ext_devset_rw_sync(nowerr, nowerr);
+
+static int ide_disk_check(ide_drive_t *drive, const char *s)
+{
+ return 1;
+}
+
+static void ide_disk_setup(ide_drive_t *drive)
+{
+ struct ide_disk_obj *idkp = drive->driver_data;
+ struct request_queue *q = drive->queue;
+ ide_hwif_t *hwif = drive->hwif;
+ u16 *id = drive->id;
+ char *m = (char *)&id[ATA_ID_PROD];
+ unsigned long long capacity;
+
+ ide_proc_register_driver(drive, idkp->driver);
+
+ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
+ return;
+
+ if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
+ /*
+ * Removable disks (eg. SYQUEST); ignore 'WD' drives
+ */
+ if (m[0] != 'W' || m[1] != 'D')
+ drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
+ }
+
+ (void)set_addressing(drive, 1);
+
+ if (drive->dev_flags & IDE_DFLAG_LBA48) {
+ int max_s = 2048;
+
+ if (max_s > hwif->rqsize)
+ max_s = hwif->rqsize;
+
+ blk_queue_max_sectors(q, max_s);
+ }
+
+ printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
+ q->max_sectors / 2);
+
+ if (ata_id_is_ssd(id))
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+
+ /* calculate drive capacity, and select LBA if possible */
+ ide_disk_get_capacity(drive);
+
+ /*
+ * if possible, give fdisk access to more of the drive,
+ * by correcting bios_cyls:
+ */
+ capacity = ide_gd_capacity(drive);
+
+ if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
+ if (ata_id_lba48_enabled(drive->id)) {
+ /* compatibility */
+ drive->bios_sect = 63;
+ drive->bios_head = 255;
+ }
+
+ if (drive->bios_sect && drive->bios_head) {
+ unsigned int cap0 = capacity; /* truncate to 32 bits */
+ unsigned int cylsz, cyl;
+
+ if (cap0 != capacity)
+ drive->bios_cyl = 65535;
+ else {
+ cylsz = drive->bios_sect * drive->bios_head;
+ cyl = cap0 / cylsz;
+ if (cyl > 65535)
+ cyl = 65535;
+ if (cyl > drive->bios_cyl)
+ drive->bios_cyl = cyl;
+ }
+ }
+ }
+ printk(KERN_INFO "%s: %llu sectors (%llu MB)",
+ drive->name, capacity, sectors_to_MB(capacity));
+
+ /* Only print cache size when it was specified */
+ if (id[ATA_ID_BUF_SIZE])
+ printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
+
+ printk(KERN_CONT ", CHS=%d/%d/%d\n",
+ drive->bios_cyl, drive->bios_head, drive->bios_sect);
+
+ /* write cache enabled? */
+ if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
+ drive->dev_flags |= IDE_DFLAG_WCACHE;
+
+ set_wcache(drive, 1);
+
+ if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
+ (drive->head == 0 || drive->head > 16)) {
+ printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
+ drive->name, drive->head);
+ drive->dev_flags &= ~IDE_DFLAG_ATTACH;
+ } else
+ drive->dev_flags |= IDE_DFLAG_ATTACH;
+}
+
+static void ide_disk_flush(ide_drive_t *drive)
+{
+ if (ata_id_flush_enabled(drive->id) == 0 ||
+ (drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
+ return;
+
+ if (do_idedisk_flushcache(drive))
+ printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
+}
+
+static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
+{
+ return 0;
+}
+
+static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
+ int on)
+{
+ ide_task_t task;
+ int ret;
+
+ if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
+ return 0;
+
+ memset(&task, 0, sizeof(task));
+ task.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
+ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+
+ ret = ide_no_data_taskfile(drive, &task);
+
+ if (ret)
+ drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
+
+ return ret;
+}
+
+#ifdef DDE_LINUX
+int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return 0;
+}
+#endif
+
+const struct ide_disk_ops ide_ata_disk_ops = {
+ .check = ide_disk_check,
+ .get_capacity = ide_disk_get_capacity,
+ .setup = ide_disk_setup,
+ .flush = ide_disk_flush,
+ .init_media = ide_disk_init_media,
+ .set_doorlock = ide_disk_set_doorlock,
+ .do_request = ide_do_rw_disk,
+ .end_request = ide_end_request,
+ .ioctl = ide_disk_ioctl,
+};
diff --git a/windhoek/ide/ide-disk.h b/windhoek/ide/ide-disk.h
new file mode 100644
index 00000000..326a12ff
--- /dev/null
+++ b/windhoek/ide/ide-disk.h
@@ -0,0 +1,30 @@
+#ifndef __IDE_DISK_H
+#define __IDE_DISK_H
+
+#include "ide-gd.h"
+#include "local.h"
+
+#ifdef CONFIG_IDE_GD_ATA
+/* ide-disk.c */
+extern const struct ide_disk_ops ide_ata_disk_ops;
+ide_decl_devset(address);
+ide_decl_devset(multcount);
+ide_decl_devset(nowerr);
+ide_decl_devset(wcache);
+ide_decl_devset(acoustic);
+
+/* ide-disk_ioctl.c */
+int ide_disk_ioctl(ide_drive_t *, struct block_device *, fmode_t, unsigned int,
+ unsigned long);
+
+#ifdef CONFIG_IDE_PROC_FS
+/* ide-disk_proc.c */
+extern ide_proc_entry_t ide_disk_proc[];
+extern const struct ide_proc_devset ide_disk_settings[];
+#endif
+#else
+#define ide_disk_proc NULL
+#define ide_disk_settings NULL
+#endif
+
+#endif /* __IDE_DISK_H */
diff --git a/windhoek/ide/ide-dma.c b/windhoek/ide/ide-dma.c
new file mode 100644
index 00000000..059c90bb
--- /dev/null
+++ b/windhoek/ide/ide-dma.c
@@ -0,0 +1,507 @@
+/*
+ * IDE DMA support (including IDE PCI BM-DMA).
+ *
+ * Copyright (C) 1995-1998 Mark Lord
+ * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
+ */
+
+/*
+ * Special Thanks to Mark for his Six years of work.
+ */
+
+/*
+ * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
+ * fixing the problem with the BIOS on some Acer motherboards.
+ *
+ * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
+ * "TX" chipset compatibility and for providing patches for the "TX" chipset.
+ *
+ * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
+ * at generic DMA -- his patches were referred to when preparing this code.
+ *
+ * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
+ * for supplying a Promise UDMA board & WD UDMA drive for this work!
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ide.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+static const struct drive_list_entry drive_whitelist[] = {
+ { "Micropolis 2112A" , NULL },
+ { "CONNER CTMA 4000" , NULL },
+ { "CONNER CTT8000-A" , NULL },
+ { "ST34342A" , NULL },
+ { NULL , NULL }
+};
+
+static const struct drive_list_entry drive_blacklist[] = {
+ { "WDC AC11000H" , NULL },
+ { "WDC AC22100H" , NULL },
+ { "WDC AC32500H" , NULL },
+ { "WDC AC33100H" , NULL },
+ { "WDC AC31600H" , NULL },
+ { "WDC AC32100H" , "24.09P07" },
+ { "WDC AC23200L" , "21.10N21" },
+ { "Compaq CRD-8241B" , NULL },
+ { "CRD-8400B" , NULL },
+ { "CRD-8480B", NULL },
+ { "CRD-8482B", NULL },
+ { "CRD-84" , NULL },
+ { "SanDisk SDP3B" , NULL },
+ { "SanDisk SDP3B-64" , NULL },
+ { "SANYO CD-ROM CRD" , NULL },
+ { "HITACHI CDR-8" , NULL },
+ { "HITACHI CDR-8335" , NULL },
+ { "HITACHI CDR-8435" , NULL },
+ { "Toshiba CD-ROM XM-6202B" , NULL },
+ { "TOSHIBA CD-ROM XM-1702BC", NULL },
+ { "CD-532E-A" , NULL },
+ { "E-IDE CD-ROM CR-840", NULL },
+ { "CD-ROM Drive/F5A", NULL },
+ { "WPI CDD-820", NULL },
+ { "SAMSUNG CD-ROM SC-148C", NULL },
+ { "SAMSUNG CD-ROM SC", NULL },
+ { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL },
+ { "_NEC DV5800A", NULL },
+ { "SAMSUNG CD-ROM SN-124", "N001" },
+ { "Seagate STT20000A", NULL },
+ { "CD-ROM CDR_U200", "1.09" },
+ { NULL , NULL }
+
+};
+
+/**
+ * ide_dma_intr - IDE DMA interrupt handler
+ * @drive: the drive the interrupt is for
+ *
+ * Handle an interrupt completing a read/write DMA transfer on an
+ * IDE device
+ */
+
+ide_startstop_t ide_dma_intr(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat = 0, dma_stat = 0;
+
+ dma_stat = hwif->dma_ops->dma_end(drive);
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
+ if (!dma_stat) {
+ struct request *rq = hwif->rq;
+
+ task_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+ printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
+ drive->name, __func__, dma_stat);
+ }
+ return ide_error(drive, "dma_intr", stat);
+}
+EXPORT_SYMBOL_GPL(ide_dma_intr);
+
+int ide_dma_good_drive(ide_drive_t *drive)
+{
+ return ide_in_drive_list(drive->id, drive_whitelist);
+}
+
+/**
+ * ide_build_sglist - map IDE scatter gather for DMA I/O
+ * @drive: the drive to build the DMA table for
+ * @rq: the request holding the sg list
+ *
+ * Perform the DMA mapping magic necessary to access the source or
+ * target buffers of a request via DMA. The lower layers of the
+ * kernel provide the necessary cache management so that we can
+ * operate in a portable fashion.
+ */
+
+int ide_build_sglist(ide_drive_t *drive, struct request *rq)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct scatterlist *sg = hwif->sg_table;
+ int i;
+
+ ide_map_sg(drive, rq);
+
+ if (rq_data_dir(rq) == READ)
+ hwif->sg_dma_direction = DMA_FROM_DEVICE;
+ else
+ hwif->sg_dma_direction = DMA_TO_DEVICE;
+
+ i = dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
+ if (i) {
+ hwif->orig_sg_nents = hwif->sg_nents;
+ hwif->sg_nents = i;
+ }
+
+ return i;
+}
+EXPORT_SYMBOL_GPL(ide_build_sglist);
+
+/**
+ * ide_destroy_dmatable - clean up DMA mapping
+ * @drive: The drive to unmap
+ *
+ * Teardown mappings after DMA has completed. This must be called
+ * after the completion of each use of ide_build_dmatable and before
+ * the next use of ide_build_dmatable. Failure to do so will cause
+ * an oops as only one mapping can be live for each target at a given
+ * time.
+ */
+
+void ide_destroy_dmatable(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->orig_sg_nents,
+ hwif->sg_dma_direction);
+}
+EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
+
+/**
+ * ide_dma_off_quietly - Generic DMA kill
+ * @drive: drive to control
+ *
+ * Turn off the current DMA on this IDE controller.
+ */
+
+void ide_dma_off_quietly(ide_drive_t *drive)
+{
+ drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
+ ide_toggle_bounce(drive, 0);
+
+ drive->hwif->dma_ops->dma_host_set(drive, 0);
+}
+EXPORT_SYMBOL(ide_dma_off_quietly);
+
+/**
+ * ide_dma_off - disable DMA on a device
+ * @drive: drive to disable DMA on
+ *
+ * Disable IDE DMA for a device on this IDE controller.
+ * Inform the user that DMA has been disabled.
+ */
+
+void ide_dma_off(ide_drive_t *drive)
+{
+ printk(KERN_INFO "%s: DMA disabled\n", drive->name);
+ ide_dma_off_quietly(drive);
+}
+EXPORT_SYMBOL(ide_dma_off);
+
+/**
+ * ide_dma_on - Enable DMA on a device
+ * @drive: drive to enable DMA on
+ *
+ * Enable IDE DMA for a device on this IDE controller.
+ */
+
+void ide_dma_on(ide_drive_t *drive)
+{
+ drive->dev_flags |= IDE_DFLAG_USING_DMA;
+ ide_toggle_bounce(drive, 1);
+
+ drive->hwif->dma_ops->dma_host_set(drive, 1);
+}
+
+int __ide_dma_bad_drive(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+
+ int blacklist = ide_in_drive_list(id, drive_blacklist);
+ if (blacklist) {
+ printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
+ drive->name, (char *)&id[ATA_ID_PROD]);
+ return blacklist;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(__ide_dma_bad_drive);
+
+static const u8 xfer_mode_bases[] = {
+ XFER_UDMA_0,
+ XFER_MW_DMA_0,
+ XFER_SW_DMA_0,
+};
+
+static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
+{
+ u16 *id = drive->id;
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ unsigned int mask = 0;
+
+ switch (base) {
+ case XFER_UDMA_0:
+ if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
+ break;
+
+ if (port_ops && port_ops->udma_filter)
+ mask = port_ops->udma_filter(drive);
+ else
+ mask = hwif->ultra_mask;
+ mask &= id[ATA_ID_UDMA_MODES];
+
+ /*
+ * avoid false cable warning from eighty_ninty_three()
+ */
+ if (req_mode > XFER_UDMA_2) {
+ if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
+ mask &= 0x07;
+ }
+ break;
+ case XFER_MW_DMA_0:
+ if ((id[ATA_ID_FIELD_VALID] & 2) == 0)
+ break;
+ if (port_ops && port_ops->mdma_filter)
+ mask = port_ops->mdma_filter(drive);
+ else
+ mask = hwif->mwdma_mask;
+ mask &= id[ATA_ID_MWDMA_MODES];
+ break;
+ case XFER_SW_DMA_0:
+ if (id[ATA_ID_FIELD_VALID] & 2) {
+ mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask;
+ } else if (id[ATA_ID_OLD_DMA_MODES] >> 8) {
+ u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
+
+ /*
+ * if the mode is valid convert it to the mask
+ * (the maximum allowed mode is XFER_SW_DMA_2)
+ */
+ if (mode <= 2)
+ mask = ((2 << mode) - 1) & hwif->swdma_mask;
+ }
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return mask;
+}
+
+/**
+ * ide_find_dma_mode - compute DMA speed
+ * @drive: IDE device
+ * @req_mode: requested mode
+ *
+ * Checks the drive/host capabilities and finds the speed to use for
+ * the DMA transfer. The speed is then limited by the requested mode.
+ *
+ * Returns 0 if the drive/host combination is incapable of DMA transfers
+ * or if the requested mode is not a DMA mode.
+ */
+
+u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned int mask;
+ int x, i;
+ u8 mode = 0;
+
+ if (drive->media != ide_disk) {
+ if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
+ if (req_mode < xfer_mode_bases[i])
+ continue;
+ mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
+ x = fls(mask) - 1;
+ if (x >= 0) {
+ mode = xfer_mode_bases[i] + x;
+ break;
+ }
+ }
+
+ if (hwif->chipset == ide_acorn && mode == 0) {
+ /*
+ * is this correct?
+ */
+ if (ide_dma_good_drive(drive) &&
+ drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
+ mode = XFER_MW_DMA_1;
+ }
+
+ mode = min(mode, req_mode);
+
+ printk(KERN_INFO "%s: %s mode selected\n", drive->name,
+ mode ? ide_xfer_verbose(mode) : "no DMA");
+
+ return mode;
+}
+EXPORT_SYMBOL_GPL(ide_find_dma_mode);
+
+static int ide_tune_dma(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 speed;
+
+ if (ata_id_has_dma(drive->id) == 0 ||
+ (drive->dev_flags & IDE_DFLAG_NODMA))
+ return 0;
+
+ /* consult the list of known "bad" drives */
+ if (__ide_dma_bad_drive(drive))
+ return 0;
+
+ if (ide_id_dma_bug(drive))
+ return 0;
+
+ if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
+ return config_drive_for_dma(drive);
+
+ speed = ide_max_dma_mode(drive);
+
+ if (!speed)
+ return 0;
+
+ if (ide_set_dma_mode(drive, speed))
+ return 0;
+
+ return 1;
+}
+
+static int ide_dma_check(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if (ide_tune_dma(drive))
+ return 0;
+
+ /* TODO: always do PIO fallback */
+ if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
+ return -1;
+
+ ide_set_max_pio(drive);
+
+ return -1;
+}
+
+int ide_id_dma_bug(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+
+ if (id[ATA_ID_FIELD_VALID] & 4) {
+ if ((id[ATA_ID_UDMA_MODES] >> 8) &&
+ (id[ATA_ID_MWDMA_MODES] >> 8))
+ goto err_out;
+ } else if (id[ATA_ID_FIELD_VALID] & 2) {
+ if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
+ (id[ATA_ID_SWDMA_MODES] >> 8))
+ goto err_out;
+ }
+ return 0;
+err_out:
+ printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
+ return 1;
+}
+
+int ide_set_dma(ide_drive_t *drive)
+{
+ int rc;
+
+ /*
+ * Force DMAing for the beginning of the check.
+ * Some chipsets appear to do interesting
+ * things, if not checked and cleared.
+ * PARANOIA!!!
+ */
+ ide_dma_off_quietly(drive);
+
+ rc = ide_dma_check(drive);
+ if (rc)
+ return rc;
+
+ ide_dma_on(drive);
+
+ return 0;
+}
+
+void ide_check_dma_crc(ide_drive_t *drive)
+{
+ u8 mode;
+
+ ide_dma_off_quietly(drive);
+ drive->crc_count = 0;
+ mode = drive->current_speed;
+ /*
+ * Don't try non Ultra-DMA modes without iCRC's. Force the
+ * device to PIO and make the user enable SWDMA/MWDMA modes.
+ */
+ if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
+ mode--;
+ else
+ mode = XFER_PIO_4;
+ ide_set_xfer_rate(drive, mode);
+ if (drive->current_speed >= XFER_SW_DMA_0)
+ ide_dma_on(drive);
+}
+
+void ide_dma_lost_irq(ide_drive_t *drive)
+{
+ printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
+}
+EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
+
+void ide_dma_timeout(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
+
+ if (hwif->dma_ops->dma_test_irq(drive))
+ return;
+
+ ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));
+
+ hwif->dma_ops->dma_end(drive);
+}
+EXPORT_SYMBOL_GPL(ide_dma_timeout);
+
+void ide_release_dma_engine(ide_hwif_t *hwif)
+{
+ if (hwif->dmatable_cpu) {
+ int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
+
+ dma_free_coherent(hwif->dev, prd_size,
+ hwif->dmatable_cpu, hwif->dmatable_dma);
+ hwif->dmatable_cpu = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(ide_release_dma_engine);
+
+int ide_allocate_dma_engine(ide_hwif_t *hwif)
+{
+ int prd_size;
+
+ if (hwif->prd_max_nents == 0)
+ hwif->prd_max_nents = PRD_ENTRIES;
+ if (hwif->prd_ent_size == 0)
+ hwif->prd_ent_size = PRD_BYTES;
+
+ prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
+
+ hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
+ &hwif->dmatable_dma,
+ GFP_ATOMIC);
+ if (hwif->dmatable_cpu == NULL) {
+ printk(KERN_ERR "%s: unable to allocate PRD table\n",
+ hwif->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
diff --git a/windhoek/ide/ide-floppy.h b/windhoek/ide/ide-floppy.h
new file mode 100644
index 00000000..6dd2beb4
--- /dev/null
+++ b/windhoek/ide/ide-floppy.h
@@ -0,0 +1,39 @@
+#ifndef __IDE_FLOPPY_H
+#define __IDE_FLOPPY_H
+
+#include "ide-gd.h"
+
+#ifdef CONFIG_IDE_GD_ATAPI
+/*
+ * Pages of the SELECT SENSE / MODE SENSE packet commands.
+ * See SFF-8070i spec.
+ */
+#define IDEFLOPPY_CAPABILITIES_PAGE 0x1b
+#define IDEFLOPPY_FLEXIBLE_DISK_PAGE 0x05
+
+/* IOCTLs used in low-level formatting. */
+#define IDEFLOPPY_IOCTL_FORMAT_SUPPORTED 0x4600
+#define IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY 0x4601
+#define IDEFLOPPY_IOCTL_FORMAT_START 0x4602
+#define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS 0x4603
+
+/* ide-floppy.c */
+extern const struct ide_disk_ops ide_atapi_disk_ops;
+void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *, u8);
+void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
+
+/* ide-floppy_ioctl.c */
+int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t,
+ unsigned int, unsigned long);
+
+#ifdef CONFIG_IDE_PROC_FS
+/* ide-floppy_proc.c */
+extern ide_proc_entry_t ide_floppy_proc[];
+extern const struct ide_proc_devset ide_floppy_settings[];
+#endif
+#else
+#define ide_floppy_proc NULL
+#define ide_floppy_settings NULL
+#endif
+
+#endif /*__IDE_FLOPPY_H */
diff --git a/windhoek/ide/ide-gd.c b/windhoek/ide/ide-gd.c
new file mode 100644
index 00000000..275ed3bf
--- /dev/null
+++ b/windhoek/ide/ide-gd.c
@@ -0,0 +1,409 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/mutex.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+
+#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
+#define IDE_DISK_MINORS (1 << PARTN_BITS)
+#else
+#define IDE_DISK_MINORS 0
+#endif
+
+#include "ide-disk.h"
+#include "ide-floppy.h"
+#include "local.h"
+
+#define IDE_GD_VERSION "1.18"
+
+/* module parameters */
+static unsigned long debug_mask;
+module_param(debug_mask, ulong, 0644);
+
+static DEFINE_MUTEX(ide_disk_ref_mutex);
+
+static void ide_disk_release(struct device *);
+
+static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
+{
+ struct ide_disk_obj *idkp = NULL;
+
+ mutex_lock(&ide_disk_ref_mutex);
+ idkp = ide_drv_g(disk, ide_disk_obj);
+ if (idkp) {
+ if (ide_device_get(idkp->drive))
+ idkp = NULL;
+ else
+ get_device(&idkp->dev);
+ }
+ mutex_unlock(&ide_disk_ref_mutex);
+ return idkp;
+}
+
+static void ide_disk_put(struct ide_disk_obj *idkp)
+{
+ ide_drive_t *drive = idkp->drive;
+
+ mutex_lock(&ide_disk_ref_mutex);
+ put_device(&idkp->dev);
+ ide_device_put(drive);
+ mutex_unlock(&ide_disk_ref_mutex);
+}
+
+sector_t ide_gd_capacity(ide_drive_t *drive)
+{
+ return drive->capacity64;
+}
+
+static int ide_gd_probe(ide_drive_t *);
+
+static void ide_gd_remove(ide_drive_t *drive)
+{
+ struct ide_disk_obj *idkp = drive->driver_data;
+ struct gendisk *g = idkp->disk;
+
+ ide_proc_unregister_driver(drive, idkp->driver);
+ device_del(&idkp->dev);
+ del_gendisk(g);
+ drive->disk_ops->flush(drive);
+
+ mutex_lock(&ide_disk_ref_mutex);
+ put_device(&idkp->dev);
+ mutex_unlock(&ide_disk_ref_mutex);
+}
+
+static void ide_disk_release(struct device *dev)
+{
+ struct ide_disk_obj *idkp = to_ide_drv(dev, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+ struct gendisk *g = idkp->disk;
+
+ drive->disk_ops = NULL;
+ drive->driver_data = NULL;
+ g->private_data = NULL;
+ put_disk(g);
+ kfree(idkp);
+}
+
+/*
+ * On HPA drives the capacity needs to be
+ * reinitilized on resume otherwise the disk
+ * can not be used and a hard reset is required
+ */
+static void ide_gd_resume(ide_drive_t *drive)
+{
+ if (ata_id_hpa_enabled(drive->id))
+ (void)drive->disk_ops->get_capacity(drive);
+}
+
+static void ide_gd_shutdown(ide_drive_t *drive)
+{
+#ifdef CONFIG_ALPHA
+ /* On Alpha, halt(8) doesn't actually turn the machine off,
+ it puts you into the sort of firmware monitor. Typically,
+ it's used to boot another kernel image, so it's not much
+ different from reboot(8). Therefore, we don't need to
+ spin down the disk in this case, especially since Alpha
+ firmware doesn't handle disks in standby mode properly.
+ On the other hand, it's reasonably safe to turn the power
+ off when the shutdown process reaches the firmware prompt,
+ as the firmware initialization takes rather long time -
+ at least 10 seconds, which should be sufficient for
+ the disk to expire its write cache. */
+ if (system_state != SYSTEM_POWER_OFF) {
+#else
+ if (system_state == SYSTEM_RESTART) {
+#endif
+ drive->disk_ops->flush(drive);
+ return;
+ }
+
+ printk(KERN_INFO "Shutdown: %s\n", drive->name);
+
+ drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND);
+}
+
+#ifdef CONFIG_IDE_PROC_FS
+static ide_proc_entry_t *ide_disk_proc_entries(ide_drive_t *drive)
+{
+ return (drive->media == ide_disk) ? ide_disk_proc : ide_floppy_proc;
+}
+
+static const struct ide_proc_devset *ide_disk_proc_devsets(ide_drive_t *drive)
+{
+ return (drive->media == ide_disk) ? ide_disk_settings
+ : ide_floppy_settings;
+}
+#endif
+
+static ide_startstop_t ide_gd_do_request(ide_drive_t *drive,
+ struct request *rq, sector_t sector)
+{
+ return drive->disk_ops->do_request(drive, rq, sector);
+}
+
+static int ide_gd_end_request(ide_drive_t *drive, int uptodate, int nrsecs)
+{
+ return drive->disk_ops->end_request(drive, uptodate, nrsecs);
+}
+
+static struct ide_driver ide_gd_driver = {
+ .gen_driver = {
+ .owner = THIS_MODULE,
+ .name = "ide-gd",
+ .bus = &ide_bus_type,
+ },
+ .probe = ide_gd_probe,
+ .remove = ide_gd_remove,
+ .resume = ide_gd_resume,
+ .shutdown = ide_gd_shutdown,
+ .version = IDE_GD_VERSION,
+ .do_request = ide_gd_do_request,
+ .end_request = ide_gd_end_request,
+#ifdef CONFIG_IDE_PROC_FS
+ .proc_entries = ide_disk_proc_entries,
+ .proc_devsets = ide_disk_proc_devsets,
+#endif
+};
+
+static int ide_gd_open(struct block_device *bdev, fmode_t mode)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct ide_disk_obj *idkp;
+ ide_drive_t *drive;
+ int ret = 0;
+
+ idkp = ide_disk_get(disk);
+ if (idkp == NULL)
+ return -ENXIO;
+
+ drive = idkp->drive;
+
+ ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
+
+ idkp->openers++;
+
+ if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
+ drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
+ /* Just in case */
+
+ ret = drive->disk_ops->init_media(drive, disk);
+
+ /*
+ * Allow O_NDELAY to open a drive without a disk, or with an
+ * unreadable disk, so that we can get the format capacity
+ * of the drive or begin the format - Sam
+ */
+ if (ret && (mode & FMODE_NDELAY) == 0) {
+ ret = -EIO;
+ goto out_put_idkp;
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_WP) && (mode & FMODE_WRITE)) {
+ ret = -EROFS;
+ goto out_put_idkp;
+ }
+
+ /*
+ * Ignore the return code from door_lock,
+ * since the open() has already succeeded,
+ * and the door_lock is irrelevant at this point.
+ */
+ drive->disk_ops->set_doorlock(drive, disk, 1);
+ drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
+ check_disk_change(bdev);
+ } else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) {
+ ret = -EBUSY;
+ goto out_put_idkp;
+ }
+ return 0;
+
+out_put_idkp:
+ idkp->openers--;
+ ide_disk_put(idkp);
+ return ret;
+}
+
+static int ide_gd_release(struct gendisk *disk, fmode_t mode)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
+
+ if (idkp->openers == 1)
+ drive->disk_ops->flush(drive);
+
+ if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
+ drive->disk_ops->set_doorlock(drive, disk, 0);
+ drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
+ }
+
+ idkp->openers--;
+
+ ide_disk_put(idkp);
+
+ return 0;
+}
+
+static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ geo->heads = drive->bios_head;
+ geo->sectors = drive->bios_sect;
+ geo->cylinders = (u16)drive->bios_cyl; /* truncate */
+ return 0;
+}
+
+static int ide_gd_media_changed(struct gendisk *disk)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+ int ret;
+
+ /* do not scan partitions twice if this is a removable device */
+ if (drive->dev_flags & IDE_DFLAG_ATTACH) {
+ drive->dev_flags &= ~IDE_DFLAG_ATTACH;
+ return 0;
+ }
+
+ ret = !!(drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED);
+ drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
+
+ return ret;
+}
+
+static int ide_gd_revalidate_disk(struct gendisk *disk)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ if (ide_gd_media_changed(disk))
+ drive->disk_ops->get_capacity(drive);
+
+ set_capacity(disk, ide_gd_capacity(drive));
+ return 0;
+}
+
+static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
+}
+
+static struct block_device_operations ide_gd_ops = {
+ .owner = THIS_MODULE,
+ .open = ide_gd_open,
+ .release = ide_gd_release,
+ .locked_ioctl = ide_gd_ioctl,
+ .getgeo = ide_gd_getgeo,
+ .media_changed = ide_gd_media_changed,
+ .revalidate_disk = ide_gd_revalidate_disk
+};
+
+static int ide_gd_probe(ide_drive_t *drive)
+{
+ const struct ide_disk_ops *disk_ops = NULL;
+ struct ide_disk_obj *idkp;
+ struct gendisk *g;
+
+ /* strstr("foo", "") is non-NULL */
+ if (!strstr("ide-gd", drive->driver_req))
+ goto failed;
+
+#ifdef CONFIG_IDE_GD_ATA
+ if (drive->media == ide_disk)
+ disk_ops = &ide_ata_disk_ops;
+#endif
+#ifdef CONFIG_IDE_GD_ATAPI
+ if (drive->media == ide_floppy)
+ disk_ops = &ide_atapi_disk_ops;
+#endif
+ if (disk_ops == NULL)
+ goto failed;
+
+ if (disk_ops->check(drive, DRV_NAME) == 0) {
+ printk(KERN_ERR PFX "%s: not supported by this driver\n",
+ drive->name);
+ goto failed;
+ }
+
+ idkp = kzalloc(sizeof(*idkp), GFP_KERNEL);
+ if (!idkp) {
+ printk(KERN_ERR PFX "%s: can't allocate a disk structure\n",
+ drive->name);
+ goto failed;
+ }
+
+ g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
+ if (!g)
+ goto out_free_idkp;
+
+ ide_init_disk(g, drive);
+
+ idkp->dev.parent = &drive->gendev;
+ idkp->dev.release = ide_disk_release;
+ dev_set_name(&idkp->dev, dev_name(&drive->gendev));
+
+ if (device_register(&idkp->dev))
+ goto out_free_disk;
+
+ idkp->drive = drive;
+ idkp->driver = &ide_gd_driver;
+ idkp->disk = g;
+
+ g->private_data = &idkp->driver;
+
+ drive->driver_data = idkp;
+ drive->debug_mask = debug_mask;
+ drive->disk_ops = disk_ops;
+
+ disk_ops->setup(drive);
+
+ set_capacity(g, ide_gd_capacity(drive));
+
+ g->minors = IDE_DISK_MINORS;
+ g->driverfs_dev = &drive->gendev;
+ g->flags |= GENHD_FL_EXT_DEVT;
+ if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
+ g->flags = GENHD_FL_REMOVABLE;
+ g->fops = &ide_gd_ops;
+ add_disk(g);
+ return 0;
+
+out_free_disk:
+ put_disk(g);
+out_free_idkp:
+ kfree(idkp);
+failed:
+ return -ENODEV;
+}
+
+static int __init ide_gd_init(void)
+{
+ printk(KERN_INFO DRV_NAME " driver " IDE_GD_VERSION "\n");
+ return driver_register(&ide_gd_driver.gen_driver);
+}
+
+static void __exit ide_gd_exit(void)
+{
+ driver_unregister(&ide_gd_driver.gen_driver);
+}
+
+MODULE_ALIAS("ide:*m-disk*");
+MODULE_ALIAS("ide-disk");
+MODULE_ALIAS("ide:*m-floppy*");
+MODULE_ALIAS("ide-floppy");
+module_init(ide_gd_init);
+module_exit(ide_gd_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("generic ATA/ATAPI disk driver");
diff --git a/windhoek/ide/ide-gd.h b/windhoek/ide/ide-gd.h
new file mode 100644
index 00000000..b604bdd3
--- /dev/null
+++ b/windhoek/ide/ide-gd.h
@@ -0,0 +1,44 @@
+#ifndef __IDE_GD_H
+#define __IDE_GD_H
+
+#define DRV_NAME "ide-gd"
+#define PFX DRV_NAME ": "
+
+/* define to see debug info */
+#define IDE_GD_DEBUG_LOG 0
+
+#if IDE_GD_DEBUG_LOG
+#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, args)
+#else
+#define ide_debug_log(lvl, fmt, args...) do {} while (0)
+#endif
+
+struct ide_disk_obj {
+ ide_drive_t *drive;
+ struct ide_driver *driver;
+ struct gendisk *disk;
+ struct device dev;
+ unsigned int openers; /* protected by BKL for now */
+
+ /* Last failed packet command */
+ struct ide_atapi_pc *failed_pc;
+ /* used for blk_{fs,pc}_request() requests */
+ struct ide_atapi_pc queued_pc;
+
+ /* Last error information */
+ u8 sense_key, asc, ascq;
+
+ int progress_indication;
+
+ /* Device information */
+ /* Current format */
+ int blocks, block_size, bs_factor;
+ /* Last format capacity descriptor */
+ u8 cap_desc[8];
+ /* Copy of the flexible disk page */
+ u8 flexible_disk_page[32];
+};
+
+sector_t ide_gd_capacity(ide_drive_t *);
+
+#endif /* __IDE_GD_H */
diff --git a/windhoek/ide/ide-generic.c b/windhoek/ide/ide-generic.c
new file mode 100644
index 00000000..d9636b67
--- /dev/null
+++ b/windhoek/ide/ide-generic.c
@@ -0,0 +1,206 @@
+/*
+ * generic/default IDE host driver
+ *
+ * Copyright (C) 2004, 2008 Bartlomiej Zolnierkiewicz
+ * This code was split off from ide.c. See it for original copyrights.
+ *
+ * May be copied or modified under the terms of the GNU General Public License.
+ */
+
+/*
+ * For special cases new interfaces may be added using sysfs, i.e.
+ *
+ * echo -n "0x168:0x36e:10" > /sys/class/ide_generic/add
+ *
+ * will add an interface using I/O ports 0x168-0x16f/0x36e and IRQ 10.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ide.h>
+#include <linux/pci_ids.h>
+
+/* FIXME: convert m32r to use ide_platform host driver */
+#ifdef CONFIG_M32R
+#include <asm/m32r.h>
+#endif
+
+#include "local.h"
+
+#define DRV_NAME "ide_generic"
+
+static int probe_mask;
+module_param(probe_mask, int, 0);
+MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
+
+static ssize_t store_add(struct class *cls, const char *buf, size_t n)
+{
+ unsigned int base, ctl;
+ int irq, rc;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+
+ if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
+ return -EINVAL;
+
+ memset(&hw, 0, sizeof(hw));
+ ide_std_init_ports(&hw, base, ctl);
+ hw.irq = irq;
+ hw.chipset = ide_generic;
+
+ rc = ide_host_add(NULL, hws, NULL);
+ if (rc)
+ return rc;
+
+ return n;
+};
+
+static struct class_attribute ide_generic_class_attrs[] = {
+ __ATTR(add, S_IWUSR, NULL, store_add),
+ __ATTR_NULL
+};
+
+static void ide_generic_class_release(struct class *cls)
+{
+ kfree(cls);
+}
+
+static int __init ide_generic_sysfs_init(void)
+{
+ struct class *cls;
+ int rc;
+
+ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+ if (!cls)
+ return -ENOMEM;
+
+ cls->name = DRV_NAME;
+ cls->owner = THIS_MODULE;
+ cls->class_release = ide_generic_class_release;
+ cls->class_attrs = ide_generic_class_attrs;
+
+ rc = class_register(cls);
+ if (rc) {
+ kfree(cls);
+ return rc;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) \
+ || defined(CONFIG_PLAT_OPSPUT)
+static const u16 legacy_bases[] = { 0x1f0 };
+static const int legacy_irqs[] = { PLD_IRQ_CFIREQ };
+#elif defined(CONFIG_PLAT_MAPPI3)
+static const u16 legacy_bases[] = { 0x1f0, 0x170 };
+static const int legacy_irqs[] = { PLD_IRQ_CFIREQ, PLD_IRQ_IDEIREQ };
+#elif defined(CONFIG_ALPHA)
+static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168 };
+static const int legacy_irqs[] = { 14, 15, 11, 10 };
+#else
+static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
+static const int legacy_irqs[] = { 14, 15, 11, 10, 8, 12 };
+#endif
+
+static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary)
+{
+ struct pci_dev *p = NULL;
+ u16 val;
+
+ for_each_pci_dev(p) {
+
+ if (pci_resource_start(p, 0) == 0x1f0)
+ *primary = 1;
+ if (pci_resource_start(p, 2) == 0x170)
+ *secondary = 1;
+
+ /* Cyrix CS55{1,2}0 pre SFF MWDMA ATA on the bridge */
+ if (p->vendor == PCI_VENDOR_ID_CYRIX &&
+ (p->device == PCI_DEVICE_ID_CYRIX_5510 ||
+ p->device == PCI_DEVICE_ID_CYRIX_5520))
+ *primary = *secondary = 1;
+
+ /* Intel MPIIX - PIO ATA on non PCI side of bridge */
+ if (p->vendor == PCI_VENDOR_ID_INTEL &&
+ p->device == PCI_DEVICE_ID_INTEL_82371MX) {
+
+ pci_read_config_word(p, 0x6C, &val);
+ if (val & 0x8000) {
+ /* ATA port enabled */
+ if (val & 0x4000)
+ *secondary = 1;
+ else
+ *primary = 1;
+ }
+ }
+ }
+}
+
+static int __init ide_generic_init(void)
+{
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ unsigned long io_addr;
+ int i, rc = 0, primary = 0, secondary = 0;
+
+ ide_generic_check_pci_legacy_iobases(&primary, &secondary);
+
+ if (!probe_mask) {
+ printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" "
+ "module parameter for probing all legacy ISA IDE ports\n");
+
+ if (primary == 0)
+ probe_mask |= 0x1;
+
+ if (secondary == 0)
+ probe_mask |= 0x2;
+ } else
+ printk(KERN_INFO DRV_NAME ": enforcing probing of I/O ports "
+ "upon user request\n");
+
+ for (i = 0; i < ARRAY_SIZE(legacy_bases); i++) {
+ io_addr = legacy_bases[i];
+
+ if ((probe_mask & (1 << i)) && io_addr) {
+ if (!request_region(io_addr, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX "
+ "not free.\n",
+ DRV_NAME, io_addr, io_addr + 7);
+ continue;
+ }
+
+ if (!request_region(io_addr + 0x206, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX "
+ "not free.\n",
+ DRV_NAME, io_addr + 0x206);
+ release_region(io_addr, 8);
+ continue;
+ }
+
+ memset(&hw, 0, sizeof(hw));
+ ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
+#ifdef CONFIG_IA64
+ hw.irq = isa_irq_to_vector(legacy_irqs[i]);
+#else
+ hw.irq = legacy_irqs[i];
+#endif
+ hw.chipset = ide_generic;
+
+ rc = ide_host_add(NULL, hws, NULL);
+ if (rc) {
+ release_region(io_addr + 0x206, 1);
+ release_region(io_addr, 8);
+ }
+ }
+ }
+
+ if (ide_generic_sysfs_init())
+ printk(KERN_ERR DRV_NAME ": failed to create ide_generic "
+ "class\n");
+
+ return rc;
+}
+
+module_init(ide_generic_init);
+
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/ide-io.c b/windhoek/ide/ide-io.c
new file mode 100644
index 00000000..a9a6c208
--- /dev/null
+++ b/windhoek/ide/ide-io.c
@@ -0,0 +1,1228 @@
+/*
+ * IDE I/O functions
+ *
+ * Basic PIO and command management functionality.
+ *
+ * This code was split off from ide.c. See ide.c for history and original
+ * copyrights.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * For the avoidance of doubt the "preferred form" of this code is one which
+ * is in an open non patent encumbered format. Where cryptographic key signing
+ * forms part of the process of creating an executable the information
+ * including keys needed to generate an equivalently functional executable
+ * are deemed to be part of the source code.
+ */
+
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/blkpg.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+#include <linux/completion.h>
+#include <linux/reboot.h>
+#include <linux/cdrom.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/kmod.h>
+#include <linux/scatterlist.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+static int __ide_end_request(ide_drive_t *drive, struct request *rq,
+ int uptodate, unsigned int nr_bytes, int dequeue)
+{
+ int ret = 1;
+ int error = 0;
+
+ if (uptodate <= 0)
+ error = uptodate ? uptodate : -EIO;
+
+ /*
+ * if failfast is set on a request, override number of sectors and
+ * complete the whole request right now
+ */
+ if (blk_noretry_request(rq) && error)
+ nr_bytes = rq->hard_nr_sectors << 9;
+
+ if (!blk_fs_request(rq) && error && !rq->errors)
+ rq->errors = -EIO;
+
+ /*
+ * decide whether to reenable DMA -- 3 is a random magic for now,
+ * if we DMA timeout more than 3 times, just stay in PIO
+ */
+ if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
+ drive->retry_pio <= 3) {
+ drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
+ ide_dma_on(drive);
+ }
+
+ if (!blk_end_request(rq, error, nr_bytes))
+ ret = 0;
+
+ if (ret == 0 && dequeue)
+ drive->hwif->rq = NULL;
+
+ return ret;
+}
+
+/**
+ * ide_end_request - complete an IDE I/O
+ * @drive: IDE device for the I/O
+ * @uptodate:
+ * @nr_sectors: number of sectors completed
+ *
+ * This is our end_request wrapper function. We complete the I/O
+ * update random number input and dequeue the request, which if
+ * it was tagged may be out of order.
+ */
+
+int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
+{
+ unsigned int nr_bytes = nr_sectors << 9;
+ struct request *rq = drive->hwif->rq;
+
+ if (!nr_bytes) {
+ if (blk_pc_request(rq))
+ nr_bytes = rq->data_len;
+ else
+ nr_bytes = rq->hard_cur_sectors << 9;
+ }
+
+ return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
+}
+EXPORT_SYMBOL(ide_end_request);
+
+/**
+ * ide_end_dequeued_request - complete an IDE I/O
+ * @drive: IDE device for the I/O
+ * @uptodate:
+ * @nr_sectors: number of sectors completed
+ *
+ * Complete an I/O that is no longer on the request queue. This
+ * typically occurs when we pull the request and issue a REQUEST_SENSE.
+ * We must still finish the old request but we must not tamper with the
+ * queue in the meantime.
+ *
+ * NOTE: This path does not handle barrier, but barrier is not supported
+ * on ide-cd anyway.
+ */
+
+int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
+ int uptodate, int nr_sectors)
+{
+ BUG_ON(!blk_rq_started(rq));
+
+ return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
+}
+EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
+
+/**
+ * ide_end_drive_cmd - end an explicit drive command
+ * @drive: command
+ * @stat: status bits
+ * @err: error bits
+ *
+ * Clean up after success/failure of an explicit drive command.
+ * These get thrown onto the queue so they are synchronized with
+ * real I/O operations on the drive.
+ *
+ * In LBA48 mode we have to read the register set twice to get
+ * all the extra information out.
+ */
+
+void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = hwif->rq;
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ ide_task_t *task = (ide_task_t *)rq->special;
+
+ if (task) {
+ struct ide_taskfile *tf = &task->tf;
+
+ tf->error = err;
+ tf->status = stat;
+
+ drive->hwif->tp_ops->tf_read(drive, task);
+
+ if (task->tf_flags & IDE_TFLAG_DYN)
+ kfree(task);
+ }
+ } else if (blk_pm_request(rq)) {
+ struct request_pm_state *pm = rq->data;
+
+ ide_complete_power_step(drive, rq);
+ if (pm->pm_step == IDE_PM_COMPLETED)
+ ide_complete_pm_request(drive, rq);
+ return;
+ }
+
+ hwif->rq = NULL;
+
+ rq->errors = err;
+
+ if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
+ blk_rq_bytes(rq))))
+ BUG();
+}
+EXPORT_SYMBOL(ide_end_drive_cmd);
+
+static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
+{
+ if (rq->rq_disk) {
+ struct ide_driver *drv;
+
+ drv = *(struct ide_driver **)rq->rq_disk->private_data;
+ drv->end_request(drive, 0, 0);
+ } else
+ ide_end_request(drive, 0, 0);
+}
+
+static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if ((stat & ATA_BUSY) ||
+ ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
+ /* other bits are useless when BUSY */
+ rq->errors |= ERROR_RESET;
+ } else if (stat & ATA_ERR) {
+ /* err has different meaning on cdrom and tape */
+ if (err == ATA_ABORTED) {
+ if ((drive->dev_flags & IDE_DFLAG_LBA) &&
+ /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
+ hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
+ return ide_stopped;
+ } else if ((err & BAD_CRC) == BAD_CRC) {
+ /* UDMA crc error, just retry the operation */
+ drive->crc_count++;
+ } else if (err & (ATA_BBK | ATA_UNC)) {
+ /* retries won't help these */
+ rq->errors = ERROR_MAX;
+ } else if (err & ATA_TRK0NF) {
+ /* help it find track zero */
+ rq->errors |= ERROR_RECAL;
+ }
+ }
+
+ if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
+ (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
+ int nsect = drive->mult_count ? drive->mult_count : 1;
+
+ ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
+ }
+
+ if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
+ ide_kill_rq(drive, rq);
+ return ide_stopped;
+ }
+
+ if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
+ rq->errors |= ERROR_RESET;
+
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
+ ++rq->errors;
+ return ide_do_reset(drive);
+ }
+
+ if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
+ drive->special.b.recalibrate = 1;
+
+ ++rq->errors;
+
+ return ide_stopped;
+}
+
+static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if ((stat & ATA_BUSY) ||
+ ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
+ /* other bits are useless when BUSY */
+ rq->errors |= ERROR_RESET;
+ } else {
+ /* add decoding error stuff */
+ }
+
+ if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
+ /* force an abort */
+ hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
+
+ if (rq->errors >= ERROR_MAX) {
+ ide_kill_rq(drive, rq);
+ } else {
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
+ ++rq->errors;
+ return ide_do_reset(drive);
+ }
+ ++rq->errors;
+ }
+
+ return ide_stopped;
+}
+
+static ide_startstop_t
+__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
+{
+ if (drive->media == ide_disk)
+ return ide_ata_error(drive, rq, stat, err);
+ return ide_atapi_error(drive, rq, stat, err);
+}
+
+/**
+ * ide_error - handle an error on the IDE
+ * @drive: drive the error occurred on
+ * @msg: message to report
+ * @stat: status bits
+ *
+ * ide_error() takes action based on the error returned by the drive.
+ * For normal I/O that may well include retries. We deal with
+ * both new-style (taskfile) and old style command handling here.
+ * In the case of taskfile command handling there is work left to
+ * do
+ */
+
+ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
+{
+ struct request *rq;
+ u8 err;
+
+ err = ide_dump_status(drive, msg, stat);
+
+ rq = drive->hwif->rq;
+ if (rq == NULL)
+ return ide_stopped;
+
+ /* retry only "normal" I/O: */
+ if (!blk_fs_request(rq)) {
+ rq->errors = 1;
+ ide_end_drive_cmd(drive, stat, err);
+ return ide_stopped;
+ }
+
+ return __ide_error(drive, rq, stat, err);
+}
+EXPORT_SYMBOL_GPL(ide_error);
+
+static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+{
+ tf->nsect = drive->sect;
+ tf->lbal = drive->sect;
+ tf->lbam = drive->cyl;
+ tf->lbah = drive->cyl >> 8;
+ tf->device = (drive->head - 1) | drive->select;
+ tf->command = ATA_CMD_INIT_DEV_PARAMS;
+}
+
+static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+{
+ tf->nsect = drive->sect;
+ tf->command = ATA_CMD_RESTORE;
+}
+
+static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+{
+ tf->nsect = drive->mult_req;
+ tf->command = ATA_CMD_SET_MULTI;
+}
+
+static ide_startstop_t ide_disk_special(ide_drive_t *drive)
+{
+ special_t *s = &drive->special;
+ ide_task_t args;
+
+ memset(&args, 0, sizeof(ide_task_t));
+ args.data_phase = TASKFILE_NO_DATA;
+
+ if (s->b.set_geometry) {
+ s->b.set_geometry = 0;
+ ide_tf_set_specify_cmd(drive, &args.tf);
+ } else if (s->b.recalibrate) {
+ s->b.recalibrate = 0;
+ ide_tf_set_restore_cmd(drive, &args.tf);
+ } else if (s->b.set_multmode) {
+ s->b.set_multmode = 0;
+ ide_tf_set_setmult_cmd(drive, &args.tf);
+ } else if (s->all) {
+ int special = s->all;
+ s->all = 0;
+ printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
+ return ide_stopped;
+ }
+
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
+ IDE_TFLAG_CUSTOM_HANDLER;
+
+ do_rw_taskfile(drive, &args);
+
+ return ide_started;
+}
+
+/**
+ * do_special - issue some special commands
+ * @drive: drive the command is for
+ *
+ * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
+ * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
+ *
+ * It used to do much more, but has been scaled back.
+ */
+
+static ide_startstop_t do_special (ide_drive_t *drive)
+{
+ special_t *s = &drive->special;
+
+#ifdef DEBUG
+ printk("%s: do_special: 0x%02x\n", drive->name, s->all);
+#endif
+ if (drive->media == ide_disk)
+ return ide_disk_special(drive);
+
+ s->all = 0;
+ drive->mult_req = 0;
+ return ide_stopped;
+}
+
+void ide_map_sg(ide_drive_t *drive, struct request *rq)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct scatterlist *sg = hwif->sg_table;
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
+ hwif->sg_nents = 1;
+ } else if (!rq->bio) {
+ sg_init_one(sg, rq->data, rq->data_len);
+ hwif->sg_nents = 1;
+ } else {
+ hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
+ }
+}
+
+EXPORT_SYMBOL_GPL(ide_map_sg);
+
+void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ hwif->nsect = hwif->nleft = rq->nr_sectors;
+ hwif->cursg_ofs = 0;
+ hwif->cursg = NULL;
+}
+
+EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
+
+/**
+ * execute_drive_command - issue special drive command
+ * @drive: the drive to issue the command on
+ * @rq: the request structure holding the command
+ *
+ * execute_drive_cmd() issues a special drive command, usually
+ * initiated by ioctl() from the external hdparm program. The
+ * command can be a drive command, drive task or taskfile
+ * operation. Weirdly you can call it with NULL to wait for
+ * all commands to finish. Don't do this as that is due to change
+ */
+
+static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
+ struct request *rq)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_task_t *task = rq->special;
+
+ if (task) {
+ hwif->data_phase = task->data_phase;
+
+ switch (hwif->data_phase) {
+ case TASKFILE_MULTI_OUT:
+ case TASKFILE_OUT:
+ case TASKFILE_MULTI_IN:
+ case TASKFILE_IN:
+ ide_init_sg_cmd(drive, rq);
+ ide_map_sg(drive, rq);
+ default:
+ break;
+ }
+
+ return do_rw_taskfile(drive, task);
+ }
+
+ /*
+ * NULL is actually a valid way of waiting for
+ * all current requests to be flushed from the queue.
+ */
+#ifdef DEBUG
+ printk("%s: DRIVE_CMD (null)\n", drive->name);
+#endif
+ ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
+ ide_read_error(drive));
+
+ return ide_stopped;
+}
+
+int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
+ int arg)
+{
+ struct request_queue *q = drive->queue;
+ struct request *rq;
+ int ret = 0;
+
+ if (!(setting->flags & DS_SYNC))
+ return setting->set(drive, arg);
+
+ rq = blk_get_request(q, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_SPECIAL;
+ rq->cmd_len = 5;
+ rq->cmd[0] = REQ_DEVSET_EXEC;
+ *(int *)&rq->cmd[1] = arg;
+ rq->special = setting->set;
+
+ if (blk_execute_rq(q, NULL, rq, 0))
+ ret = rq->errors;
+ blk_put_request(rq);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ide_devset_execute);
+
+static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
+{
+ u8 cmd = rq->cmd[0];
+
+ if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) {
+ ide_task_t task;
+ struct ide_taskfile *tf = &task.tf;
+
+ memset(&task, 0, sizeof(task));
+ if (cmd == REQ_PARK_HEADS) {
+ drive->sleep = *(unsigned long *)rq->special;
+ drive->dev_flags |= IDE_DFLAG_SLEEPING;
+ tf->command = ATA_CMD_IDLEIMMEDIATE;
+ tf->feature = 0x44;
+ tf->lbal = 0x4c;
+ tf->lbam = 0x4e;
+ tf->lbah = 0x55;
+ task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
+ } else /* cmd == REQ_UNPARK_HEADS */
+ tf->command = ATA_CMD_CHK_POWER;
+
+ task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ task.rq = rq;
+ drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
+ return do_rw_taskfile(drive, &task);
+ }
+
+ switch (cmd) {
+ case REQ_DEVSET_EXEC:
+ {
+ int err, (*setfunc)(ide_drive_t *, int) = rq->special;
+
+ err = setfunc(drive, *(int *)&rq->cmd[1]);
+ if (err)
+ rq->errors = err;
+ else
+ err = 1;
+ ide_end_request(drive, err, 0);
+ return ide_stopped;
+ }
+ case REQ_DRIVE_RESET:
+ return ide_do_reset(drive);
+ default:
+ blk_dump_rq_flags(rq, "ide_special_rq - bad request");
+ ide_end_request(drive, 0, 0);
+ return ide_stopped;
+ }
+}
+
+/**
+ * start_request - start of I/O and command issuing for IDE
+ *
+ * start_request() initiates handling of a new I/O request. It
+ * accepts commands and I/O (read/write) requests.
+ *
+ * FIXME: this function needs a rename
+ */
+
+static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
+{
+ ide_startstop_t startstop;
+
+ BUG_ON(!blk_rq_started(rq));
+
+#ifdef DEBUG
+ printk("%s: start_request: current=0x%08lx\n",
+ drive->hwif->name, (unsigned long) rq);
+#endif
+
+ /* bail early if we've exceeded max_failures */
+ if (drive->max_failures && (drive->failures > drive->max_failures)) {
+ rq->cmd_flags |= REQ_FAILED;
+ goto kill_rq;
+ }
+
+ if (blk_pm_request(rq))
+ ide_check_pm_state(drive, rq);
+
+ SELECT_DRIVE(drive);
+ if (ide_wait_stat(&startstop, drive, drive->ready_stat,
+ ATA_BUSY | ATA_DRQ, WAIT_READY)) {
+ printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
+ return startstop;
+ }
+ if (!drive->special.all) {
+ struct ide_driver *drv;
+
+ /*
+ * We reset the drive so we need to issue a SETFEATURES.
+ * Do it _after_ do_special() restored device parameters.
+ */
+ if (drive->current_speed == 0xff)
+ ide_config_drive_speed(drive, drive->desired_speed);
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
+ return execute_drive_cmd(drive, rq);
+ else if (blk_pm_request(rq)) {
+ struct request_pm_state *pm = rq->data;
+#ifdef DEBUG_PM
+ printk("%s: start_power_step(step: %d)\n",
+ drive->name, pm->pm_step);
+#endif
+ startstop = ide_start_power_step(drive, rq);
+ if (startstop == ide_stopped &&
+ pm->pm_step == IDE_PM_COMPLETED)
+ ide_complete_pm_request(drive, rq);
+ return startstop;
+ } else if (!rq->rq_disk && blk_special_request(rq))
+ /*
+ * TODO: Once all ULDs have been modified to
+ * check for specific op codes rather than
+ * blindly accepting any special request, the
+ * check for ->rq_disk above may be replaced
+ * by a more suitable mechanism or even
+ * dropped entirely.
+ */
+ return ide_special_rq(drive, rq);
+
+ drv = *(struct ide_driver **)rq->rq_disk->private_data;
+
+ return drv->do_request(drive, rq, rq->sector);
+ }
+ return do_special(drive);
+kill_rq:
+ ide_kill_rq(drive, rq);
+ return ide_stopped;
+}
+
+/**
+ * ide_stall_queue - pause an IDE device
+ * @drive: drive to stall
+ * @timeout: time to stall for (jiffies)
+ *
+ * ide_stall_queue() can be used by a drive to give excess bandwidth back
+ * to the port by sleeping for timeout jiffies.
+ */
+
+void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
+{
+ if (timeout > WAIT_WORSTCASE)
+ timeout = WAIT_WORSTCASE;
+ drive->sleep = timeout + jiffies;
+ drive->dev_flags |= IDE_DFLAG_SLEEPING;
+}
+EXPORT_SYMBOL(ide_stall_queue);
+
+static inline int ide_lock_port(ide_hwif_t *hwif)
+{
+ if (hwif->busy)
+ return 1;
+
+ hwif->busy = 1;
+
+ return 0;
+}
+
+static inline void ide_unlock_port(ide_hwif_t *hwif)
+{
+ hwif->busy = 0;
+}
+
+static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
+{
+ int rc = 0;
+
+ if (host->host_flags & IDE_HFLAG_SERIALIZE) {
+ rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
+ if (rc == 0) {
+ /* for atari only */
+ ide_get_lock(ide_intr, hwif);
+ }
+ }
+ return rc;
+}
+
+static inline void ide_unlock_host(struct ide_host *host)
+{
+ if (host->host_flags & IDE_HFLAG_SERIALIZE) {
+ /* for atari only */
+ ide_release_lock();
+ clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
+ }
+}
+
+/*
+ * Issue a new request to a device.
+ */
+void do_ide_request(struct request_queue *q)
+{
+ ide_drive_t *drive = q->queuedata;
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_host *host = hwif->host;
+ struct request *rq = NULL;
+ ide_startstop_t startstop;
+
+ /*
+ * drive is doing pre-flush, ordered write, post-flush sequence. even
+ * though that is 3 requests, it must be seen as a single transaction.
+ * we must not preempt this drive until that is complete
+ */
+ if (blk_queue_flushing(q))
+ /*
+ * small race where queue could get replugged during
+ * the 3-request flush cycle, just yank the plug since
+ * we want it to finish asap
+ */
+ blk_remove_plug(q);
+
+ spin_unlock_irq(q->queue_lock);
+
+ if (ide_lock_host(host, hwif))
+ goto plug_device_2;
+
+ spin_lock_irq(&hwif->lock);
+
+ if (!ide_lock_port(hwif)) {
+ ide_hwif_t *prev_port;
+repeat:
+ prev_port = hwif->host->cur_port;
+ hwif->rq = NULL;
+
+ if (drive->dev_flags & IDE_DFLAG_SLEEPING) {
+ if (time_before(drive->sleep, jiffies)) {
+ ide_unlock_port(hwif);
+ goto plug_device;
+ }
+ }
+
+ if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
+ hwif != prev_port) {
+ /*
+ * set nIEN for previous port, drives in the
+ * quirk_list may not like intr setups/cleanups
+ */
+ if (prev_port && prev_port->cur_dev->quirk_list == 0)
+ prev_port->tp_ops->set_irq(prev_port, 0);
+
+ hwif->host->cur_port = hwif;
+ }
+ hwif->cur_dev = drive;
+ drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
+
+ spin_unlock_irq(&hwif->lock);
+ spin_lock_irq(q->queue_lock);
+ /*
+ * we know that the queue isn't empty, but this can happen
+ * if the q->prep_rq_fn() decides to kill a request
+ */
+ rq = elv_next_request(drive->queue);
+ spin_unlock_irq(q->queue_lock);
+ spin_lock_irq(&hwif->lock);
+
+ if (!rq) {
+ ide_unlock_port(hwif);
+ goto out;
+ }
+
+ /*
+ * Sanity: don't accept a request that isn't a PM request
+ * if we are currently power managed. This is very important as
+ * blk_stop_queue() doesn't prevent the elv_next_request()
+ * above to return us whatever is in the queue. Since we call
+ * ide_do_request() ourselves, we end up taking requests while
+ * the queue is blocked...
+ *
+ * We let requests forced at head of queue with ide-preempt
+ * though. I hope that doesn't happen too much, hopefully not
+ * unless the subdriver triggers such a thing in its own PM
+ * state machine.
+ */
+ if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
+ blk_pm_request(rq) == 0 &&
+ (rq->cmd_flags & REQ_PREEMPT) == 0) {
+ /* there should be no pending command at this point */
+ ide_unlock_port(hwif);
+ goto plug_device;
+ }
+
+ hwif->rq = rq;
+
+ spin_unlock_irq(&hwif->lock);
+ startstop = start_request(drive, rq);
+ spin_lock_irq(&hwif->lock);
+
+ if (startstop == ide_stopped)
+ goto repeat;
+ } else
+ goto plug_device;
+out:
+ spin_unlock_irq(&hwif->lock);
+ if (rq == NULL)
+ ide_unlock_host(host);
+ spin_lock_irq(q->queue_lock);
+ return;
+
+plug_device:
+ spin_unlock_irq(&hwif->lock);
+ ide_unlock_host(host);
+plug_device_2:
+ spin_lock_irq(q->queue_lock);
+
+ if (!elv_queue_empty(q))
+ blk_plug_device(q);
+}
+
+/*
+ * un-busy the port etc, and clear any pending DMA status. we want to
+ * retry the current request in pio mode instead of risking tossing it
+ * all away
+ */
+static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq;
+ ide_startstop_t ret = ide_stopped;
+
+ /*
+ * end current dma transaction
+ */
+
+ if (error < 0) {
+ printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
+ (void)hwif->dma_ops->dma_end(drive);
+ ret = ide_error(drive, "dma timeout error",
+ hwif->tp_ops->read_status(hwif));
+ } else {
+ printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
+ hwif->dma_ops->dma_timeout(drive);
+ }
+
+ /*
+ * disable dma for now, but remember that we did so because of
+ * a timeout -- we'll reenable after we finish this next request
+ * (or rather the first chunk of it) in pio.
+ */
+ drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
+ drive->retry_pio++;
+ ide_dma_off_quietly(drive);
+
+ /*
+ * un-busy drive etc and make sure request is sane
+ */
+
+ rq = hwif->rq;
+ if (!rq)
+ goto out;
+
+ hwif->rq = NULL;
+
+ rq->errors = 0;
+
+ if (!rq->bio)
+ goto out;
+
+ rq->sector = rq->bio->bi_sector;
+ rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
+ rq->hard_cur_sectors = rq->current_nr_sectors;
+ rq->buffer = bio_data(rq->bio);
+out:
+ return ret;
+}
+
+static void ide_plug_device(ide_drive_t *drive)
+{
+ struct request_queue *q = drive->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!elv_queue_empty(q))
+ blk_plug_device(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/**
+ * ide_timer_expiry - handle lack of an IDE interrupt
+ * @data: timer callback magic (hwif)
+ *
+ * An IDE command has timed out before the expected drive return
+ * occurred. At this point we attempt to clean up the current
+ * mess. If the current handler includes an expiry handler then
+ * we invoke the expiry handler, and providing it is happy the
+ * work is done. If that fails we apply generic recovery rules
+ * invoking the handler and checking the drive DMA status. We
+ * have an excessively incestuous relationship with the DMA
+ * logic that wants cleaning up.
+ */
+
+void ide_timer_expiry (unsigned long data)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *)data;
+ ide_drive_t *uninitialized_var(drive);
+ ide_handler_t *handler;
+ unsigned long flags;
+ int wait = -1;
+ int plug_device = 0;
+
+ spin_lock_irqsave(&hwif->lock, flags);
+
+ handler = hwif->handler;
+
+ if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
+ /*
+ * Either a marginal timeout occurred
+ * (got the interrupt just as timer expired),
+ * or we were "sleeping" to give other devices a chance.
+ * Either way, we don't really want to complain about anything.
+ */
+ } else {
+ ide_expiry_t *expiry = hwif->expiry;
+ ide_startstop_t startstop = ide_stopped;
+
+ drive = hwif->cur_dev;
+
+ if (expiry) {
+ wait = expiry(drive);
+ if (wait > 0) { /* continue */
+ /* reset timer */
+ hwif->timer.expires = jiffies + wait;
+ hwif->req_gen_timer = hwif->req_gen;
+ add_timer(&hwif->timer);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ return;
+ }
+ }
+ hwif->handler = NULL;
+ /*
+ * We need to simulate a real interrupt when invoking
+ * the handler() function, which means we need to
+ * globally mask the specific IRQ:
+ */
+ spin_unlock(&hwif->lock);
+ /* disable_irq_nosync ?? */
+ disable_irq(hwif->irq);
+ /* local CPU only, as if we were handling an interrupt */
+ local_irq_disable();
+ if (hwif->polling) {
+ startstop = handler(drive);
+ } else if (drive_is_ready(drive)) {
+ if (drive->waiting_for_dma)
+ hwif->dma_ops->dma_lost_irq(drive);
+ (void)ide_ack_intr(hwif);
+ printk(KERN_WARNING "%s: lost interrupt\n",
+ drive->name);
+ startstop = handler(drive);
+ } else {
+ if (drive->waiting_for_dma)
+ startstop = ide_dma_timeout_retry(drive, wait);
+ else
+ startstop = ide_error(drive, "irq timeout",
+ hwif->tp_ops->read_status(hwif));
+ }
+ spin_lock_irq(&hwif->lock);
+ enable_irq(hwif->irq);
+ if (startstop == ide_stopped) {
+ ide_unlock_port(hwif);
+ plug_device = 1;
+ }
+ }
+ spin_unlock_irqrestore(&hwif->lock, flags);
+
+ if (plug_device) {
+ ide_unlock_host(hwif->host);
+ ide_plug_device(drive);
+ }
+}
+
+/**
+ * unexpected_intr - handle an unexpected IDE interrupt
+ * @irq: interrupt line
+ * @hwif: port being processed
+ *
+ * There's nothing really useful we can do with an unexpected interrupt,
+ * other than reading the status register (to clear it), and logging it.
+ * There should be no way that an irq can happen before we're ready for it,
+ * so we needn't worry much about losing an "important" interrupt here.
+ *
+ * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
+ * the drive enters "idle", "standby", or "sleep" mode, so if the status
+ * looks "good", we just ignore the interrupt completely.
+ *
+ * This routine assumes __cli() is in effect when called.
+ *
+ * If an unexpected interrupt happens on irq15 while we are handling irq14
+ * and if the two interfaces are "serialized" (CMD640), then it looks like
+ * we could screw up by interfering with a new request being set up for
+ * irq15.
+ *
+ * In reality, this is a non-issue. The new command is not sent unless
+ * the drive is ready to accept one, in which case we know the drive is
+ * not trying to interrupt us. And ide_set_handler() is always invoked
+ * before completing the issuance of any new drive command, so we will not
+ * be accidentally invoked as a result of any valid command completion
+ * interrupt.
+ */
+
+static void unexpected_intr(int irq, ide_hwif_t *hwif)
+{
+ u8 stat = hwif->tp_ops->read_status(hwif);
+
+ if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
+ /* Try to not flood the console with msgs */
+ static unsigned long last_msgtime, count;
+ ++count;
+
+ if (time_after(jiffies, last_msgtime + HZ)) {
+ last_msgtime = jiffies;
+ printk(KERN_ERR "%s: unexpected interrupt, "
+ "status=0x%02x, count=%ld\n",
+ hwif->name, stat, count);
+ }
+ }
+}
+
+/**
+ * ide_intr - default IDE interrupt handler
+ * @irq: interrupt number
+ * @dev_id: hwif
+ * @regs: unused weirdness from the kernel irq layer
+ *
+ * This is the default IRQ handler for the IDE layer. You should
+ * not need to override it. If you do be aware it is subtle in
+ * places
+ *
+ * hwif is the interface in the group currently performing
+ * a command. hwif->cur_dev is the drive and hwif->handler is
+ * the IRQ handler to call. As we issue a command the handlers
+ * step through multiple states, reassigning the handler to the
+ * next step in the process. Unlike a smart SCSI controller IDE
+ * expects the main processor to sequence the various transfer
+ * stages. We also manage a poll timer to catch up with most
+ * timeout situations. There are still a few where the handlers
+ * don't ever decide to give up.
+ *
+ * The handler eventually returns ide_stopped to indicate the
+ * request completed. At this point we issue the next request
+ * on the port and the process begins again.
+ */
+
+irqreturn_t ide_intr (int irq, void *dev_id)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
+ ide_drive_t *uninitialized_var(drive);
+ ide_handler_t *handler;
+ unsigned long flags;
+ ide_startstop_t startstop;
+ irqreturn_t irq_ret = IRQ_NONE;
+ int plug_device = 0;
+
+ if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) {
+ if (hwif != hwif->host->cur_port)
+ goto out_early;
+ }
+
+ spin_lock_irqsave(&hwif->lock, flags);
+
+ if (!ide_ack_intr(hwif))
+ goto out;
+
+ handler = hwif->handler;
+
+ if (handler == NULL || hwif->polling) {
+ /*
+ * Not expecting an interrupt from this drive.
+ * That means this could be:
+ * (1) an interrupt from another PCI device
+ * sharing the same PCI INT# as us.
+ * or (2) a drive just entered sleep or standby mode,
+ * and is interrupting to let us know.
+ * or (3) a spurious interrupt of unknown origin.
+ *
+ * For PCI, we cannot tell the difference,
+ * so in that case we just ignore it and hope it goes away.
+ *
+ * FIXME: unexpected_intr should be hwif-> then we can
+ * remove all the ifdef PCI crap
+ */
+#ifdef CONFIG_BLK_DEV_IDEPCI
+ if (hwif->chipset != ide_pci)
+#endif /* CONFIG_BLK_DEV_IDEPCI */
+ {
+ /*
+ * Probably not a shared PCI interrupt,
+ * so we can safely try to do something about it:
+ */
+ unexpected_intr(irq, hwif);
+#ifdef CONFIG_BLK_DEV_IDEPCI
+ } else {
+ /*
+ * Whack the status register, just in case
+ * we have a leftover pending IRQ.
+ */
+ (void)hwif->tp_ops->read_status(hwif);
+#endif /* CONFIG_BLK_DEV_IDEPCI */
+ }
+ goto out;
+ }
+
+ drive = hwif->cur_dev;
+
+ if (!drive_is_ready(drive))
+ /*
+ * This happens regularly when we share a PCI IRQ with
+ * another device. Unfortunately, it can also happen
+ * with some buggy drives that trigger the IRQ before
+ * their status register is up to date. Hopefully we have
+ * enough advance overhead that the latter isn't a problem.
+ */
+ goto out;
+
+ hwif->handler = NULL;
+ hwif->req_gen++;
+ del_timer(&hwif->timer);
+ spin_unlock(&hwif->lock);
+
+ if (hwif->port_ops && hwif->port_ops->clear_irq)
+ hwif->port_ops->clear_irq(drive);
+
+ if (drive->dev_flags & IDE_DFLAG_UNMASK)
+ local_irq_enable_in_hardirq();
+
+ /* service this interrupt, may set handler for next interrupt */
+ startstop = handler(drive);
+
+ spin_lock_irq(&hwif->lock);
+ /*
+ * Note that handler() may have set things up for another
+ * interrupt to occur soon, but it cannot happen until
+ * we exit from this routine, because it will be the
+ * same irq as is currently being serviced here, and Linux
+ * won't allow another of the same (on any CPU) until we return.
+ */
+ if (startstop == ide_stopped) {
+ BUG_ON(hwif->handler);
+ ide_unlock_port(hwif);
+ plug_device = 1;
+ }
+ irq_ret = IRQ_HANDLED;
+out:
+ spin_unlock_irqrestore(&hwif->lock, flags);
+out_early:
+ if (plug_device) {
+ ide_unlock_host(hwif->host);
+ ide_plug_device(drive);
+ }
+
+ return irq_ret;
+}
+EXPORT_SYMBOL_GPL(ide_intr);
+
+/**
+ * ide_do_drive_cmd - issue IDE special command
+ * @drive: device to issue command
+ * @rq: request to issue
+ *
+ * This function issues a special IDE device request
+ * onto the request queue.
+ *
+ * the rq is queued at the head of the request queue, displacing
+ * the currently-being-processed request and this function
+ * returns immediately without waiting for the new rq to be
+ * completed. This is VERY DANGEROUS, and is intended for
+ * careful use by the ATAPI tape/cdrom driver code.
+ */
+
+void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
+{
+ struct request_queue *q = drive->queue;
+ unsigned long flags;
+
+ drive->hwif->rq = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(ide_do_drive_cmd);
+
+void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
+ IDE_TFLAG_OUT_FEATURE | tf_flags;
+ task.tf.feature = dma; /* Use PIO/DMA */
+ task.tf.lbam = bcount & 0xff;
+ task.tf.lbah = (bcount >> 8) & 0xff;
+
+ ide_tf_dump(drive->name, &task.tf);
+ hwif->tp_ops->set_irq(hwif, 1);
+ SELECT_MASK(drive, 0);
+ hwif->tp_ops->tf_load(drive, &task);
+}
+
+EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
+
+void ide_pad_transfer(ide_drive_t *drive, int write, int len)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 buf[4] = { 0 };
+
+ while (len > 0) {
+ if (write)
+ hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
+ else
+ hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
+ len -= 4;
+ }
+}
+EXPORT_SYMBOL_GPL(ide_pad_transfer);
diff --git a/windhoek/ide/ide-iops.c b/windhoek/ide/ide-iops.c
new file mode 100644
index 00000000..56181a53
--- /dev/null
+++ b/windhoek/ide/ide-iops.c
@@ -0,0 +1,1215 @@
+/*
+ * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/blkpg.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/bitops.h>
+#include <linux/nmi.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include "local.h"
+
+/*
+ * Conventional PIO operations for ATA devices
+ */
+
+static u8 ide_inb (unsigned long port)
+{
+ return (u8) inb(port);
+}
+
+static void ide_outb (u8 val, unsigned long port)
+{
+ outb(val, port);
+}
+
+/*
+ * MMIO operations, typically used for SATA controllers
+ */
+
+static u8 ide_mm_inb (unsigned long port)
+{
+ return (u8) readb((void __iomem *) port);
+}
+
+static void ide_mm_outb (u8 value, unsigned long port)
+{
+ writeb(value, (void __iomem *) port);
+}
+
+void SELECT_DRIVE (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ ide_task_t task;
+
+ if (port_ops && port_ops->selectproc)
+ port_ops->selectproc(drive);
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_OUT_DEVICE;
+
+ drive->hwif->tp_ops->tf_load(drive, &task);
+}
+
+void SELECT_MASK(ide_drive_t *drive, int mask)
+{
+ const struct ide_port_ops *port_ops = drive->hwif->port_ops;
+
+ if (port_ops && port_ops->maskproc)
+ port_ops->maskproc(drive, mask);
+}
+
+void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
+ else
+ outb(cmd, hwif->io_ports.command_addr);
+}
+EXPORT_SYMBOL_GPL(ide_exec_command);
+
+u8 ide_read_status(ide_hwif_t *hwif)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return readb((void __iomem *)hwif->io_ports.status_addr);
+ else
+ return inb(hwif->io_ports.status_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_status);
+
+u8 ide_read_altstatus(ide_hwif_t *hwif)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return readb((void __iomem *)hwif->io_ports.ctl_addr);
+ else
+ return inb(hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_altstatus);
+
+void ide_set_irq(ide_hwif_t *hwif, int on)
+{
+ u8 ctl = ATA_DEVCTL_OBS;
+
+ if (on == 4) { /* hack for SRST */
+ ctl |= 4;
+ on &= ~4;
+ }
+
+ ctl |= on ? 0 : 2;
+
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
+ else
+ outb(ctl, hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_set_irq);
+
+void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ struct ide_taskfile *tf = &task->tf;
+ void (*tf_outb)(u8 addr, unsigned long port);
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+ u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+ if (mmio)
+ tf_outb = ide_mm_outb;
+ else
+ tf_outb = ide_outb;
+
+ if (task->tf_flags & IDE_TFLAG_FLAGGED)
+ HIHI = 0xFF;
+
+ if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
+ u16 data = (tf->hob_data << 8) | tf->data;
+
+ if (mmio)
+ writew(data, (void __iomem *)io_ports->data_addr);
+ else
+ outw(data, io_ports->data_addr);
+ }
+
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+ tf_outb(tf->hob_feature, io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+ tf_outb(tf->hob_nsect, io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+ tf_outb(tf->hob_lbal, io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+ tf_outb(tf->hob_lbam, io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+ tf_outb(tf->hob_lbah, io_ports->lbah_addr);
+
+ if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+ tf_outb(tf->feature, io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+ tf_outb(tf->nsect, io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+ tf_outb(tf->lbal, io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+ tf_outb(tf->lbam, io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+ tf_outb(tf->lbah, io_ports->lbah_addr);
+
+ if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+ tf_outb((tf->device & HIHI) | drive->select,
+ io_ports->device_addr);
+}
+EXPORT_SYMBOL_GPL(ide_tf_load);
+
+void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ struct ide_taskfile *tf = &task->tf;
+ void (*tf_outb)(u8 addr, unsigned long port);
+ u8 (*tf_inb)(unsigned long port);
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+ if (mmio) {
+ tf_outb = ide_mm_outb;
+ tf_inb = ide_mm_inb;
+ } else {
+ tf_outb = ide_outb;
+ tf_inb = ide_inb;
+ }
+
+ if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+ u16 data;
+
+ if (mmio)
+ data = readw((void __iomem *)io_ports->data_addr);
+ else
+ data = inw(io_ports->data_addr);
+
+ tf->data = data & 0xff;
+ tf->hob_data = (data >> 8) & 0xff;
+ }
+
+ /* be sure we're looking at the low order bits */
+ tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
+
+ if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+ tf->feature = tf_inb(io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+ tf->nsect = tf_inb(io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+ tf->lbal = tf_inb(io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+ tf->lbam = tf_inb(io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+ tf->lbah = tf_inb(io_ports->lbah_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+ tf->device = tf_inb(io_ports->device_addr);
+
+ if (task->tf_flags & IDE_TFLAG_LBA48) {
+ tf_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
+
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+ tf->hob_feature = tf_inb(io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+ tf->hob_nsect = tf_inb(io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+ tf->hob_lbal = tf_inb(io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+ tf->hob_lbam = tf_inb(io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+ tf->hob_lbah = tf_inb(io_ports->lbah_addr);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_tf_read);
+
+/*
+ * Some localbus EIDE interfaces require a special access sequence
+ * when using 32-bit I/O instructions to transfer data. We call this
+ * the "vlb_sync" sequence, which consists of three successive reads
+ * of the sector count register location, with interrupts disabled
+ * to ensure that the reads all happen together.
+ */
+static void ata_vlb_sync(unsigned long port)
+{
+ (void)inb(port);
+ (void)inb(port);
+ (void)inb(port);
+}
+
+/*
+ * This is used for most PIO data transfers *from* the IDE interface
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd len is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
+ */
+void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf,
+ unsigned int len)
+{
+ DEBUG_MSG("drive %p, rq %p, buf %p, len %d", drive, rq, buf, len);
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ unsigned long data_addr = io_ports->data_addr;
+ u8 io_32bit = drive->io_32bit;
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+ len++;
+
+ DEBUG_MSG("io32bit: %d", io_32bit);
+
+ if (io_32bit) {
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+ local_irq_save(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+ if (mmio)
+ __ide_mm_insl((void __iomem *)data_addr, buf, len / 4);
+ else
+ insl(data_addr, buf, len / 4);
+
+ if ((io_32bit & 2) && !mmio)
+ local_irq_restore(flags);
+
+ if ((len & 3) >= 2) {
+ if (mmio)
+ __ide_mm_insw((void __iomem *)data_addr,
+ (u8 *)buf + (len & ~3), 1);
+ else
+ insw(data_addr, (u8 *)buf + (len & ~3), 1);
+ }
+ } else {
+ if (mmio)
+ __ide_mm_insw((void __iomem *)data_addr, buf, len / 2);
+ else
+ insw(data_addr, buf, len / 2);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_input_data);
+
+/*
+ * This is used for most PIO data transfers *to* the IDE interface
+ */
+void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
+ unsigned int len)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ unsigned long data_addr = io_ports->data_addr;
+ u8 io_32bit = drive->io_32bit;
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+ len++;
+
+ if (io_32bit) {
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+ local_irq_save(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+ if (mmio)
+ __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4);
+ else
+ outsl(data_addr, buf, len / 4);
+
+ if ((io_32bit & 2) && !mmio)
+ local_irq_restore(flags);
+
+ if ((len & 3) >= 2) {
+ if (mmio)
+ __ide_mm_outsw((void __iomem *)data_addr,
+ (u8 *)buf + (len & ~3), 1);
+ else
+ outsw(data_addr, (u8 *)buf + (len & ~3), 1);
+ }
+ } else {
+ if (mmio)
+ __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2);
+ else
+ outsw(data_addr, buf, len / 2);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_output_data);
+
+u8 ide_read_error(ide_drive_t *drive)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_FEATURE;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ return task.tf.error;
+}
+EXPORT_SYMBOL_GPL(ide_read_error);
+
+void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM |
+ IDE_TFLAG_IN_NSECT;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ *bcount = (task.tf.lbah << 8) | task.tf.lbam;
+ *ireason = task.tf.nsect & 3;
+}
+EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
+
+const struct ide_tp_ops default_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+
+ .set_irq = ide_set_irq,
+
+ .tf_load = ide_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = ide_input_data,
+ .output_data = ide_output_data,
+};
+
+void ide_fix_driveid(u16 *id)
+{
+#ifndef __LITTLE_ENDIAN
+# ifdef __BIG_ENDIAN
+ int i;
+
+ for (i = 0; i < 256; i++)
+ id[i] = __le16_to_cpu(id[i]);
+# else
+# error "Please fix <asm/byteorder.h>"
+# endif
+#endif
+}
+
+/*
+ * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
+ * removing leading/trailing blanks and compressing internal blanks.
+ * It is primarily used to tidy up the model name/number fields as
+ * returned by the ATA_CMD_ID_ATA[PI] commands.
+ */
+
+void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
+{
+ u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */
+
+ if (byteswap) {
+ /* convert from big-endian to host byte order */
+ for (p = s ; p != end ; p += 2)
+ be16_to_cpus((u16 *) p);
+ }
+
+ /* strip leading blanks */
+ p = s;
+ while (s != end && *s == ' ')
+ ++s;
+ /* compress internal blanks and strip trailing blanks */
+ while (s != end && *s) {
+ if (*s++ != ' ' || (s != end && *s && *s != ' '))
+ *p++ = *(s-1);
+ }
+ /* wipe out trailing garbage */
+ while (p != end)
+ *p++ = '\0';
+}
+
+EXPORT_SYMBOL(ide_fixstring);
+
+/*
+ * Needed for PCI irq sharing
+ */
+int drive_is_ready (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat = 0;
+
+ if (drive->waiting_for_dma)
+ return hwif->dma_ops->dma_test_irq(drive);
+
+ /*
+ * We do a passive status test under shared PCI interrupts on
+ * cards that truly share the ATA side interrupt, but may also share
+ * an interrupt with another pci card/device. We make no assumptions
+ * about possible isa-pnp and pci-pnp issues yet.
+ */
+ if (hwif->io_ports.ctl_addr &&
+ (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
+ stat = hwif->tp_ops->read_altstatus(hwif);
+ else
+ /* Note: this may clear a pending IRQ!! */
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (stat & ATA_BUSY)
+ /* drive busy: definitely not interrupting */
+ return 0;
+
+ /* drive ready: *might* be interrupting */
+ return 1;
+}
+
+EXPORT_SYMBOL(drive_is_ready);
+
+/*
+ * This routine busy-waits for the drive status to be not "busy".
+ * It then checks the status for all of the "good" bits and none
+ * of the "bad" bits, and if all is okay it returns 0. All other
+ * cases return error -- caller may then invoke ide_error().
+ *
+ * This routine should get fixed to not hog the cpu during extra long waits..
+ * That could be done by busy-waiting for the first jiffy or two, and then
+ * setting a timer to wake up at half second intervals thereafter,
+ * until timeout is achieved, before timing out.
+ */
+static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ unsigned long flags;
+ int i;
+ u8 stat;
+
+ udelay(1); /* spec allows drive 400ns to assert "BUSY" */
+ stat = tp_ops->read_status(hwif);
+
+ if (stat & ATA_BUSY) {
+ local_save_flags(flags);
+ local_irq_enable_in_hardirq();
+ timeout += jiffies;
+ while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
+ if (time_after(jiffies, timeout)) {
+ /*
+ * One last read after the timeout in case
+ * heavy interrupt load made us not make any
+ * progress during the timeout..
+ */
+ stat = tp_ops->read_status(hwif);
+ if ((stat & ATA_BUSY) == 0)
+ break;
+
+ local_irq_restore(flags);
+ *rstat = stat;
+ return -EBUSY;
+ }
+ }
+ local_irq_restore(flags);
+ }
+ /*
+ * Allow status to settle, then read it again.
+ * A few rare drives vastly violate the 400ns spec here,
+ * so we'll wait up to 10usec for a "good" status
+ * rather than expensively fail things immediately.
+ * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
+ */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ stat = tp_ops->read_status(hwif);
+
+ if (OK_STAT(stat, good, bad)) {
+ *rstat = stat;
+ return 0;
+ }
+ }
+ *rstat = stat;
+ return -EFAULT;
+}
+
+/*
+ * In case of error returns error value after doing "*startstop = ide_error()".
+ * The caller should return the updated value of "startstop" in this case,
+ * "startstop" is unchanged when the function returns 0.
+ */
+int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
+{
+ int err;
+ u8 stat;
+
+ /* bail early if we've exceeded max_failures */
+ if (drive->max_failures && (drive->failures > drive->max_failures)) {
+ *startstop = ide_stopped;
+ return 1;
+ }
+
+ err = __ide_wait_stat(drive, good, bad, timeout, &stat);
+
+ if (err) {
+ char *s = (err == -EBUSY) ? "status timeout" : "status error";
+ *startstop = ide_error(drive, s, stat);
+ }
+
+ return err;
+}
+
+EXPORT_SYMBOL(ide_wait_stat);
+
+/**
+ * ide_in_drive_list - look for drive in black/white list
+ * @id: drive identifier
+ * @table: list to inspect
+ *
+ * Look for a drive in the blacklist and the whitelist tables
+ * Returns 1 if the drive is found in the table.
+ */
+
+int ide_in_drive_list(u16 *id, const struct drive_list_entry *table)
+{
+ for ( ; table->id_model; table++)
+ if ((!strcmp(table->id_model, (char *)&id[ATA_ID_PROD])) &&
+ (!table->id_firmware ||
+ strstr((char *)&id[ATA_ID_FW_REV], table->id_firmware)))
+ return 1;
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ide_in_drive_list);
+
+/*
+ * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
+ * We list them here and depend on the device side cable detection for them.
+ *
+ * Some optical devices with the buggy firmwares have the same problem.
+ */
+static const struct drive_list_entry ivb_list[] = {
+ { "QUANTUM FIREBALLlct10 05" , "A03.0900" },
+ { "TSSTcorp CDDVDW SH-S202J" , "SB00" },
+ { "TSSTcorp CDDVDW SH-S202J" , "SB01" },
+ { "TSSTcorp CDDVDW SH-S202N" , "SB00" },
+ { "TSSTcorp CDDVDW SH-S202N" , "SB01" },
+ { "TSSTcorp CDDVDW SH-S202H" , "SB00" },
+ { "TSSTcorp CDDVDW SH-S202H" , "SB01" },
+ { "SAMSUNG SP0822N" , "WA100-10" },
+ { NULL , NULL }
+};
+
+/*
+ * All hosts that use the 80c ribbon must use!
+ * The name is derived from upper byte of word 93 and the 80c ribbon.
+ */
+u8 eighty_ninty_three (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u16 *id = drive->id;
+ int ivb = ide_in_drive_list(id, ivb_list);
+
+ if (hwif->cbl == ATA_CBL_PATA40_SHORT)
+ return 1;
+
+ if (ivb)
+ printk(KERN_DEBUG "%s: skipping word 93 validity check\n",
+ drive->name);
+
+ if (ata_id_is_sata(id) && !ivb)
+ return 1;
+
+ if (hwif->cbl != ATA_CBL_PATA80 && !ivb)
+ goto no_80w;
+
+ /*
+ * FIXME:
+ * - change master/slave IDENTIFY order
+ * - force bit13 (80c cable present) check also for !ivb devices
+ * (unless the slave device is pre-ATA3)
+ */
+ if ((id[ATA_ID_HW_CONFIG] & 0x4000) ||
+ (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000)))
+ return 1;
+
+no_80w:
+ if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
+ return 0;
+
+ printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
+ "limiting max speed to UDMA33\n",
+ drive->name,
+ hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
+
+ drive->dev_flags |= IDE_DFLAG_UDMA33_WARNED;
+
+ return 0;
+}
+
+int ide_driveid_update(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ u16 *id;
+ unsigned long flags;
+ u8 stat;
+
+ /*
+ * Re-read drive->id for possible DMA mode
+ * change (copied from ide-probe.c)
+ */
+
+ SELECT_MASK(drive, 1);
+ tp_ops->set_irq(hwif, 0);
+ msleep(50);
+ tp_ops->exec_command(hwif, ATA_CMD_ID_ATA);
+
+ if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 1)) {
+ SELECT_MASK(drive, 0);
+ return 0;
+ }
+
+ msleep(50); /* wait for IRQ and ATA_DRQ */
+ stat = tp_ops->read_status(hwif);
+
+ if (!OK_STAT(stat, ATA_DRQ, BAD_R_STAT)) {
+ SELECT_MASK(drive, 0);
+ printk("%s: CHECK for good STATUS\n", drive->name);
+ return 0;
+ }
+ local_irq_save(flags);
+ SELECT_MASK(drive, 0);
+ id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
+ if (!id) {
+ local_irq_restore(flags);
+ return 0;
+ }
+ tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+ (void)tp_ops->read_status(hwif); /* clear drive IRQ */
+ local_irq_enable();
+ local_irq_restore(flags);
+ ide_fix_driveid(id);
+
+ drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES];
+ drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES];
+ drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES];
+ /* anything more ? */
+
+ kfree(id);
+
+ if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive))
+ ide_dma_off(drive);
+
+ return 1;
+}
+
+int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ u16 *id = drive->id, i;
+ int error = 0;
+ u8 stat;
+ ide_task_t task;
+
+#ifdef CONFIG_BLK_DEV_IDEDMA
+ if (hwif->dma_ops) /* check if host supports DMA */
+ hwif->dma_ops->dma_host_set(drive, 0);
+#endif
+
+ /* Skip setting PIO flow-control modes on pre-EIDE drives */
+ if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0)
+ goto skip;
+
+ /*
+ * Don't use ide_wait_cmd here - it will
+ * attempt to set_geometry and recalibrate,
+ * but for some reason these don't work at
+ * this point (lost interrupt).
+ */
+ /*
+ * Select the drive, and issue the SETFEATURES command
+ */
+ disable_irq_nosync(hwif->irq);
+
+ /*
+ * FIXME: we race against the running IRQ here if
+ * this is called from non IRQ context. If we use
+ * disable_irq() we hang on the error path. Work
+ * is needed.
+ */
+
+ udelay(1);
+ SELECT_DRIVE(drive);
+ SELECT_MASK(drive, 1);
+ udelay(1);
+ tp_ops->set_irq(hwif, 0);
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT;
+ task.tf.feature = SETFEATURES_XFER;
+ task.tf.nsect = speed;
+
+ tp_ops->tf_load(drive, &task);
+
+ tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
+
+ if (drive->quirk_list == 2)
+ tp_ops->set_irq(hwif, 1);
+
+ error = __ide_wait_stat(drive, drive->ready_stat,
+ ATA_BUSY | ATA_DRQ | ATA_ERR,
+ WAIT_CMD, &stat);
+
+ SELECT_MASK(drive, 0);
+
+ enable_irq(hwif->irq);
+
+ if (error) {
+ (void) ide_dump_status(drive, "set_drive_speed_status", stat);
+ return error;
+ }
+
+ id[ATA_ID_UDMA_MODES] &= ~0xFF00;
+ id[ATA_ID_MWDMA_MODES] &= ~0x0F00;
+ id[ATA_ID_SWDMA_MODES] &= ~0x0F00;
+
+ skip:
+#ifdef CONFIG_BLK_DEV_IDEDMA
+ if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA))
+ hwif->dma_ops->dma_host_set(drive, 1);
+ else if (hwif->dma_ops) /* check if host supports DMA */
+ ide_dma_off_quietly(drive);
+#endif
+
+ if (speed >= XFER_UDMA_0) {
+ i = 1 << (speed - XFER_UDMA_0);
+ id[ATA_ID_UDMA_MODES] |= (i << 8 | i);
+ } else if (speed >= XFER_MW_DMA_0) {
+ i = 1 << (speed - XFER_MW_DMA_0);
+ id[ATA_ID_MWDMA_MODES] |= (i << 8 | i);
+ } else if (speed >= XFER_SW_DMA_0) {
+ i = 1 << (speed - XFER_SW_DMA_0);
+ id[ATA_ID_SWDMA_MODES] |= (i << 8 | i);
+ }
+
+ if (!drive->init_speed)
+ drive->init_speed = speed;
+ drive->current_speed = speed;
+ return error;
+}
+
+/*
+ * This should get invoked any time we exit the driver to
+ * wait for an interrupt response from a drive. handler() points
+ * at the appropriate code to handle the next interrupt, and a
+ * timer is started to prevent us from waiting forever in case
+ * something goes wrong (see the ide_timer_expiry() handler later on).
+ *
+ * See also ide_execute_command
+ */
+static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
+ unsigned int timeout, ide_expiry_t *expiry)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ BUG_ON(hwif->handler);
+ hwif->handler = handler;
+ hwif->expiry = expiry;
+ hwif->timer.expires = jiffies + timeout;
+ hwif->req_gen_timer = hwif->req_gen;
+ add_timer(&hwif->timer);
+}
+
+void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
+ unsigned int timeout, ide_expiry_t *expiry)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwif->lock, flags);
+ __ide_set_handler(drive, handler, timeout, expiry);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+}
+
+EXPORT_SYMBOL(ide_set_handler);
+
+/**
+ * ide_execute_command - execute an IDE command
+ * @drive: IDE drive to issue the command against
+ * @command: command byte to write
+ * @handler: handler for next phase
+ * @timeout: timeout for command
+ * @expiry: handler to run on timeout
+ *
+ * Helper function to issue an IDE command. This handles the
+ * atomicity requirements, command timing and ensures that the
+ * handler and IRQ setup do not race. All IDE command kick off
+ * should go via this function or do equivalent locking.
+ */
+
+void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
+ unsigned timeout, ide_expiry_t *expiry)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwif->lock, flags);
+ __ide_set_handler(drive, handler, timeout, expiry);
+ hwif->tp_ops->exec_command(hwif, cmd);
+ /*
+ * Drive takes 400nS to respond, we must avoid the IRQ being
+ * serviced before that.
+ *
+ * FIXME: we could skip this delay with care on non shared devices
+ */
+ ndelay(400);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+}
+EXPORT_SYMBOL(ide_execute_command);
+
+void ide_execute_pkt_cmd(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwif->lock, flags);
+ hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET);
+ ndelay(400);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
+
+static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
+{
+ struct request *rq = drive->hwif->rq;
+
+ if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET)
+ ide_end_request(drive, err ? err : 1, 0);
+}
+
+/* needed below */
+static ide_startstop_t do_reset1 (ide_drive_t *, int);
+
+/*
+ * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an atapi drive reset operation. If the drive has not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat;
+
+ SELECT_DRIVE(drive);
+ udelay (10);
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (OK_STAT(stat, 0, ATA_BUSY))
+ printk("%s: ATAPI reset complete\n", drive->name);
+ else {
+ if (time_before(jiffies, hwif->poll_timeout)) {
+ ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
+ /* continue polling */
+ return ide_started;
+ }
+ /* end of polling */
+ hwif->polling = 0;
+ printk("%s: ATAPI reset timed-out, status=0x%02x\n",
+ drive->name, stat);
+ /* do it the old fashioned way */
+ return do_reset1(drive, 1);
+ }
+ /* done polling */
+ hwif->polling = 0;
+ ide_complete_drive_reset(drive, 0);
+ return ide_stopped;
+}
+
+static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
+{
+ static const char *err_master_vals[] =
+ { NULL, "passed", "formatter device error",
+ "sector buffer error", "ECC circuitry error",
+ "controlling MPU error" };
+
+ u8 err_master = err & 0x7f;
+
+ printk(KERN_ERR "%s: reset: master: ", hwif->name);
+ if (err_master && err_master < 6)
+ printk(KERN_CONT "%s", err_master_vals[err_master]);
+ else
+ printk(KERN_CONT "error (0x%02x?)", err);
+ if (err & 0x80)
+ printk(KERN_CONT "; slave: failed");
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an ide reset operation. If the drives have not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ u8 tmp;
+ int err = 0;
+
+ if (port_ops && port_ops->reset_poll) {
+ err = port_ops->reset_poll(drive);
+ if (err) {
+ printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
+ hwif->name, drive->name);
+ goto out;
+ }
+ }
+
+ tmp = hwif->tp_ops->read_status(hwif);
+
+ if (!OK_STAT(tmp, 0, ATA_BUSY)) {
+ if (time_before(jiffies, hwif->poll_timeout)) {
+ ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
+ /* continue polling */
+ return ide_started;
+ }
+ printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
+ drive->failures++;
+ err = -EIO;
+ } else {
+ tmp = ide_read_error(drive);
+
+ if (tmp == 1) {
+ printk(KERN_INFO "%s: reset: success\n", hwif->name);
+ drive->failures = 0;
+ } else {
+ ide_reset_report_error(hwif, tmp);
+ drive->failures++;
+ err = -EIO;
+ }
+ }
+out:
+ hwif->polling = 0; /* done polling */
+ ide_complete_drive_reset(drive, err);
+ return ide_stopped;
+}
+
+static void ide_disk_pre_reset(ide_drive_t *drive)
+{
+ int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
+
+ drive->special.all = 0;
+ drive->special.b.set_geometry = legacy;
+ drive->special.b.recalibrate = legacy;
+
+ drive->mult_count = 0;
+ drive->dev_flags &= ~IDE_DFLAG_PARKED;
+
+ if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
+ (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
+ drive->mult_req = 0;
+
+ if (drive->mult_req != drive->mult_count)
+ drive->special.b.set_multmode = 1;
+}
+
+static void pre_reset(ide_drive_t *drive)
+{
+ const struct ide_port_ops *port_ops = drive->hwif->port_ops;
+
+ if (drive->media == ide_disk)
+ ide_disk_pre_reset(drive);
+ else
+ drive->dev_flags |= IDE_DFLAG_POST_RESET;
+
+ if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
+ if (drive->crc_count)
+ ide_check_dma_crc(drive);
+ else
+ ide_dma_off(drive);
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
+ if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
+ drive->dev_flags &= ~IDE_DFLAG_UNMASK;
+ drive->io_32bit = 0;
+ }
+ return;
+ }
+
+ if (port_ops && port_ops->pre_reset)
+ port_ops->pre_reset(drive);
+
+ if (drive->current_speed != 0xff)
+ drive->desired_speed = drive->current_speed;
+ drive->current_speed = 0xff;
+}
+
+/*
+ * do_reset1() attempts to recover a confused drive by resetting it.
+ * Unfortunately, resetting a disk drive actually resets all devices on
+ * the same interface, so it can really be thought of as resetting the
+ * interface rather than resetting the drive.
+ *
+ * ATAPI devices have their own reset mechanism which allows them to be
+ * individually reset without clobbering other devices on the same interface.
+ *
+ * Unfortunately, the IDE interface does not generate an interrupt to let
+ * us know when the reset operation has finished, so we must poll for this.
+ * Equally poor, though, is the fact that this may a very long time to complete,
+ * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
+ * we set a timer to poll at 50ms intervals.
+ */
+static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ const struct ide_port_ops *port_ops;
+ ide_drive_t *tdrive;
+ unsigned long flags, timeout;
+ int i;
+ DEFINE_WAIT(wait);
+
+ spin_lock_irqsave(&hwif->lock, flags);
+
+ /* We must not reset with running handlers */
+ BUG_ON(hwif->handler != NULL);
+
+ /* For an ATAPI device, first try an ATAPI SRST. */
+ if (drive->media != ide_disk && !do_not_try_atapi) {
+ pre_reset(drive);
+ SELECT_DRIVE(drive);
+ udelay (20);
+ tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
+ ndelay(400);
+ hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
+ hwif->polling = 1;
+ __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ return ide_started;
+ }
+
+ /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
+ do {
+ unsigned long now;
+
+ prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
+ timeout = jiffies;
+ ide_port_for_each_dev(i, tdrive, hwif) {
+ if (tdrive->dev_flags & IDE_DFLAG_PRESENT &&
+ tdrive->dev_flags & IDE_DFLAG_PARKED &&
+ time_after(tdrive->sleep, timeout))
+ timeout = tdrive->sleep;
+ }
+
+ now = jiffies;
+ if (time_before_eq(timeout, now))
+ break;
+
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ timeout = schedule_timeout_uninterruptible(timeout - now);
+ spin_lock_irqsave(&hwif->lock, flags);
+ } while (timeout);
+ finish_wait(&ide_park_wq, &wait);
+
+ /*
+ * First, reset any device state data we were maintaining
+ * for any of the drives on this interface.
+ */
+ ide_port_for_each_dev(i, tdrive, hwif)
+ pre_reset(tdrive);
+
+ if (io_ports->ctl_addr == 0) {
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ ide_complete_drive_reset(drive, -ENXIO);
+ return ide_stopped;
+ }
+
+ /*
+ * Note that we also set nIEN while resetting the device,
+ * to mask unwanted interrupts from the interface during the reset.
+ * However, due to the design of PC hardware, this will cause an
+ * immediate interrupt due to the edge transition it produces.
+ * This single interrupt gives us a "fast poll" for drives that
+ * recover from reset very quickly, saving us the first 50ms wait time.
+ *
+ * TODO: add ->softreset method and stop abusing ->set_irq
+ */
+ /* set SRST and nIEN */
+ tp_ops->set_irq(hwif, 4);
+ /* more than enough time */
+ udelay(10);
+ /* clear SRST, leave nIEN (unless device is on the quirk list) */
+ tp_ops->set_irq(hwif, drive->quirk_list == 2);
+ /* more than enough time */
+ udelay(10);
+ hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
+ hwif->polling = 1;
+ __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
+
+ /*
+ * Some weird controller like resetting themselves to a strange
+ * state when the disks are reset this way. At least, the Winbond
+ * 553 documentation says that
+ */
+ port_ops = hwif->port_ops;
+ if (port_ops && port_ops->resetproc)
+ port_ops->resetproc(drive);
+
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ return ide_started;
+}
+
+/*
+ * ide_do_reset() is the entry point to the drive/interface reset code.
+ */
+
+ide_startstop_t ide_do_reset (ide_drive_t *drive)
+{
+ return do_reset1(drive, 0);
+}
+
+EXPORT_SYMBOL(ide_do_reset);
+
+/*
+ * ide_wait_not_busy() waits for the currently selected device on the hwif
+ * to report a non-busy status, see comments in ide_probe_port().
+ */
+int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
+{
+ u8 stat = 0;
+
+ while(timeout--) {
+ /*
+ * Turn this into a schedule() sleep once I'm sure
+ * about locking issues (2.5 work ?).
+ */
+ mdelay(1);
+ stat = hwif->tp_ops->read_status(hwif);
+ if ((stat & ATA_BUSY) == 0)
+ return 0;
+ /*
+ * Assume a value of 0xff means nothing is connected to
+ * the interface and it doesn't implement the pull-down
+ * resistor on D7.
+ */
+ if (stat == 0xff)
+ return -ENODEV;
+ touch_softlockup_watchdog();
+ touch_nmi_watchdog();
+ }
+ return -EBUSY;
+}
diff --git a/windhoek/ide/ide-lib.c b/windhoek/ide/ide-lib.c
new file mode 100644
index 00000000..09526a0d
--- /dev/null
+++ b/windhoek/ide/ide-lib.c
@@ -0,0 +1,423 @@
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ide.h>
+#include <linux/bitops.h>
+
+static const char *udma_str[] =
+ { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44",
+ "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" };
+static const char *mwdma_str[] =
+ { "MWDMA0", "MWDMA1", "MWDMA2" };
+static const char *swdma_str[] =
+ { "SWDMA0", "SWDMA1", "SWDMA2" };
+static const char *pio_str[] =
+ { "PIO0", "PIO1", "PIO2", "PIO3", "PIO4", "PIO5" };
+
+/**
+ * ide_xfer_verbose - return IDE mode names
+ * @mode: transfer mode
+ *
+ * Returns a constant string giving the name of the mode
+ * requested.
+ */
+
+const char *ide_xfer_verbose(u8 mode)
+{
+ const char *s;
+ u8 i = mode & 0xf;
+
+ if (mode >= XFER_UDMA_0 && mode <= XFER_UDMA_7)
+ s = udma_str[i];
+ else if (mode >= XFER_MW_DMA_0 && mode <= XFER_MW_DMA_2)
+ s = mwdma_str[i];
+ else if (mode >= XFER_SW_DMA_0 && mode <= XFER_SW_DMA_2)
+ s = swdma_str[i];
+ else if (mode >= XFER_PIO_0 && mode <= XFER_PIO_5)
+ s = pio_str[i & 0x7];
+ else if (mode == XFER_PIO_SLOW)
+ s = "PIO SLOW";
+ else
+ s = "XFER ERROR";
+
+ return s;
+}
+EXPORT_SYMBOL(ide_xfer_verbose);
+
+/**
+ * ide_rate_filter - filter transfer mode
+ * @drive: IDE device
+ * @speed: desired speed
+ *
+ * Given the available transfer modes this function returns
+ * the best available speed at or below the speed requested.
+ *
+ * TODO: check device PIO capabilities
+ */
+
+static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 mode = ide_find_dma_mode(drive, speed);
+
+ if (mode == 0) {
+ if (hwif->pio_mask)
+ mode = fls(hwif->pio_mask) - 1 + XFER_PIO_0;
+ else
+ mode = XFER_PIO_4;
+ }
+
+/* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
+
+ return min(speed, mode);
+}
+
+/**
+ * ide_get_best_pio_mode - get PIO mode from drive
+ * @drive: drive to consider
+ * @mode_wanted: preferred mode
+ * @max_mode: highest allowed mode
+ *
+ * This routine returns the recommended PIO settings for a given drive,
+ * based on the drive->id information and the ide_pio_blacklist[].
+ *
+ * Drive PIO mode is auto-selected if 255 is passed as mode_wanted.
+ * This is used by most chipset support modules when "auto-tuning".
+ */
+
+u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
+{
+ u16 *id = drive->id;
+ int pio_mode = -1, overridden = 0;
+
+ if (mode_wanted != 255)
+ return min_t(u8, mode_wanted, max_mode);
+
+ if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_BLACKLIST) == 0)
+ pio_mode = ide_scan_pio_blacklist((char *)&id[ATA_ID_PROD]);
+
+ if (pio_mode != -1) {
+ printk(KERN_INFO "%s: is on PIO blacklist\n", drive->name);
+ } else {
+ pio_mode = id[ATA_ID_OLD_PIO_MODES] >> 8;
+ if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */
+ pio_mode = 2;
+ overridden = 1;
+ }
+
+ if (id[ATA_ID_FIELD_VALID] & 2) { /* ATA2? */
+ if (ata_id_has_iordy(id)) {
+ if (id[ATA_ID_PIO_MODES] & 7) {
+ overridden = 0;
+ if (id[ATA_ID_PIO_MODES] & 4)
+ pio_mode = 5;
+ else if (id[ATA_ID_PIO_MODES] & 2)
+ pio_mode = 4;
+ else
+ pio_mode = 3;
+ }
+ }
+ }
+
+ if (overridden)
+ printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
+ drive->name);
+ }
+
+ if (pio_mode > max_mode)
+ pio_mode = max_mode;
+
+ return pio_mode;
+}
+EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
+
+/* req_pio == "255" for auto-tune */
+void ide_set_pio(ide_drive_t *drive, u8 req_pio)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ u8 host_pio, pio;
+
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
+ return;
+
+ BUG_ON(hwif->pio_mask == 0x00);
+
+ host_pio = fls(hwif->pio_mask) - 1;
+
+ pio = ide_get_best_pio_mode(drive, req_pio, host_pio);
+
+ /*
+ * TODO:
+ * - report device max PIO mode
+ * - check req_pio != 255 against device max PIO mode
+ */
+ printk(KERN_DEBUG "%s: host max PIO%d wanted PIO%d%s selected PIO%d\n",
+ drive->name, host_pio, req_pio,
+ req_pio == 255 ? "(auto-tune)" : "", pio);
+
+ (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
+}
+EXPORT_SYMBOL_GPL(ide_set_pio);
+
+/**
+ * ide_toggle_bounce - handle bounce buffering
+ * @drive: drive to update
+ * @on: on/off boolean
+ *
+ * Enable or disable bounce buffering for the device. Drives move
+ * between PIO and DMA and that changes the rules we need.
+ */
+
+void ide_toggle_bounce(ide_drive_t *drive, int on)
+{
+ u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
+
+ if (!PCI_DMA_BUS_IS_PHYS) {
+ addr = BLK_BOUNCE_ANY;
+ } else if (on && drive->media == ide_disk) {
+ struct device *dev = drive->hwif->dev;
+
+ if (dev && dev->dma_mask)
+ addr = *dev->dma_mask;
+ }
+
+ if (drive->queue)
+ blk_queue_bounce_limit(drive->queue, addr);
+}
+
+int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ return 0;
+
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL)
+ return -1;
+
+ /*
+ * TODO: temporary hack for some legacy host drivers that didn't
+ * set transfer mode on the device in ->set_pio_mode method...
+ */
+ if (port_ops->set_dma_mode == NULL) {
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ return 0;
+ }
+
+ if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
+ if (ide_config_drive_speed(drive, mode))
+ return -1;
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ return 0;
+ } else {
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ return ide_config_drive_speed(drive, mode);
+ }
+}
+
+int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ return 0;
+
+ if (port_ops == NULL || port_ops->set_dma_mode == NULL)
+ return -1;
+
+ if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
+ if (ide_config_drive_speed(drive, mode))
+ return -1;
+ port_ops->set_dma_mode(drive, mode);
+ return 0;
+ } else {
+ port_ops->set_dma_mode(drive, mode);
+ return ide_config_drive_speed(drive, mode);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_set_dma_mode);
+
+/**
+ * ide_set_xfer_rate - set transfer rate
+ * @drive: drive to set
+ * @rate: speed to attempt to set
+ *
+ * General helper for setting the speed of an IDE device. This
+ * function knows about user enforced limits from the configuration
+ * which ->set_pio_mode/->set_dma_mode does not.
+ */
+
+int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
+ return -1;
+
+ rate = ide_rate_filter(drive, rate);
+
+ BUG_ON(rate < XFER_PIO_0);
+
+ if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
+ return ide_set_pio_mode(drive, rate);
+
+ return ide_set_dma_mode(drive, rate);
+}
+
+static void ide_dump_opcode(ide_drive_t *drive)
+{
+ struct request *rq = drive->hwif->rq;
+ ide_task_t *task = NULL;
+
+ if (!rq)
+ return;
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
+ task = rq->special;
+
+ printk(KERN_ERR "ide: failed opcode was: ");
+ if (task == NULL)
+ printk(KERN_CONT "unknown\n");
+ else
+ printk(KERN_CONT "0x%02x\n", task->tf.command);
+}
+
+u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48)
+{
+ u32 high, low;
+
+ if (lba48)
+ high = (tf->hob_lbah << 16) | (tf->hob_lbam << 8) |
+ tf->hob_lbal;
+ else
+ high = tf->device & 0xf;
+ low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
+
+ return ((u64)high << 24) | low;
+}
+EXPORT_SYMBOL_GPL(ide_get_lba_addr);
+
+static void ide_dump_sector(ide_drive_t *drive)
+{
+ ide_task_t task;
+ struct ide_taskfile *tf = &task.tf;
+ u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
+
+ memset(&task, 0, sizeof(task));
+ if (lba48)
+ task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_HOB_LBA |
+ IDE_TFLAG_LBA48;
+ else
+ task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ if (lba48 || (tf->device & ATA_LBA))
+ printk(KERN_CONT ", LBAsect=%llu",
+ (unsigned long long)ide_get_lba_addr(tf, lba48));
+ else
+ printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
+ tf->device & 0xf, tf->lbal);
+}
+
+static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
+{
+ printk(KERN_ERR "{ ");
+ if (err & ATA_ABORTED)
+ printk(KERN_CONT "DriveStatusError ");
+ if (err & ATA_ICRC)
+ printk(KERN_CONT "%s",
+ (err & ATA_ABORTED) ? "BadCRC " : "BadSector ");
+ if (err & ATA_UNC)
+ printk(KERN_CONT "UncorrectableError ");
+ if (err & ATA_IDNF)
+ printk(KERN_CONT "SectorIdNotFound ");
+ if (err & ATA_TRK0NF)
+ printk(KERN_CONT "TrackZeroNotFound ");
+ if (err & ATA_AMNF)
+ printk(KERN_CONT "AddrMarkNotFound ");
+ printk(KERN_CONT "}");
+ if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK ||
+ (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) {
+ struct request *rq = drive->hwif->rq;
+
+ ide_dump_sector(drive);
+
+ if (rq)
+ printk(KERN_CONT ", sector=%llu",
+ (unsigned long long)rq->sector);
+ }
+ printk(KERN_CONT "\n");
+}
+
+static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
+{
+ printk(KERN_ERR "{ ");
+ if (err & ATAPI_ILI)
+ printk(KERN_CONT "IllegalLengthIndication ");
+ if (err & ATAPI_EOM)
+ printk(KERN_CONT "EndOfMedia ");
+ if (err & ATA_ABORTED)
+ printk(KERN_CONT "AbortedCommand ");
+ if (err & ATA_MCR)
+ printk(KERN_CONT "MediaChangeRequested ");
+ if (err & ATAPI_LFS)
+ printk(KERN_CONT "LastFailedSense=0x%02x ",
+ (err & ATAPI_LFS) >> 4);
+ printk(KERN_CONT "}\n");
+}
+
+/**
+ * ide_dump_status - translate ATA/ATAPI error
+ * @drive: drive that status applies to
+ * @msg: text message to print
+ * @stat: status byte to decode
+ *
+ * Error reporting, in human readable form (luxurious, but a memory hog).
+ * Combines the drive name, message and status byte to provide a
+ * user understandable explanation of the device error.
+ */
+
+u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
+{
+ u8 err = 0;
+
+ printk(KERN_ERR "%s: %s: status=0x%02x { ", drive->name, msg, stat);
+ if (stat & ATA_BUSY)
+ printk(KERN_CONT "Busy ");
+ else {
+ if (stat & ATA_DRDY)
+ printk(KERN_CONT "DriveReady ");
+ if (stat & ATA_DF)
+ printk(KERN_CONT "DeviceFault ");
+ if (stat & ATA_DSC)
+ printk(KERN_CONT "SeekComplete ");
+ if (stat & ATA_DRQ)
+ printk(KERN_CONT "DataRequest ");
+ if (stat & ATA_CORR)
+ printk(KERN_CONT "CorrectedError ");
+ if (stat & ATA_IDX)
+ printk(KERN_CONT "Index ");
+ if (stat & ATA_ERR)
+ printk(KERN_CONT "Error ");
+ }
+ printk(KERN_CONT "}\n");
+ if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) {
+ err = ide_read_error(drive);
+ printk(KERN_ERR "%s: %s: error=0x%02x ", drive->name, msg, err);
+ if (drive->media == ide_disk)
+ ide_dump_ata_error(drive, err);
+ else
+ ide_dump_atapi_error(drive, err);
+ }
+ ide_dump_opcode(drive);
+ return err;
+}
+EXPORT_SYMBOL(ide_dump_status);
diff --git a/windhoek/ide/ide-park.c b/windhoek/ide/ide-park.c
new file mode 100644
index 00000000..c875a957
--- /dev/null
+++ b/windhoek/ide/ide-park.c
@@ -0,0 +1,124 @@
+#include <linux/kernel.h>
+#include <linux/ide.h>
+#include <linux/jiffies.h>
+#include <linux/blkdev.h>
+
+DECLARE_WAIT_QUEUE_HEAD(ide_park_wq);
+
+static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request_queue *q = drive->queue;
+ struct request *rq;
+ int rc;
+
+ timeout += jiffies;
+ spin_lock_irq(&hwif->lock);
+ if (drive->dev_flags & IDE_DFLAG_PARKED) {
+ int reset_timer = time_before(timeout, drive->sleep);
+ int start_queue = 0;
+
+ drive->sleep = timeout;
+ wake_up_all(&ide_park_wq);
+ if (reset_timer && del_timer(&hwif->timer))
+ start_queue = 1;
+ spin_unlock_irq(&hwif->lock);
+
+ if (start_queue) {
+ spin_lock_irq(q->queue_lock);
+ blk_start_queueing(q);
+ spin_unlock_irq(q->queue_lock);
+ }
+ return;
+ }
+ spin_unlock_irq(&hwif->lock);
+
+ rq = blk_get_request(q, READ, __GFP_WAIT);
+ rq->cmd[0] = REQ_PARK_HEADS;
+ rq->cmd_len = 1;
+ rq->cmd_type = REQ_TYPE_SPECIAL;
+ rq->special = &timeout;
+ rc = blk_execute_rq(q, NULL, rq, 1);
+ blk_put_request(rq);
+ if (rc)
+ goto out;
+
+ /*
+ * Make sure that *some* command is sent to the drive after the
+ * timeout has expired, so power management will be reenabled.
+ */
+ rq = blk_get_request(q, READ, GFP_NOWAIT);
+ if (unlikely(!rq))
+ goto out;
+
+ rq->cmd[0] = REQ_UNPARK_HEADS;
+ rq->cmd_len = 1;
+ rq->cmd_type = REQ_TYPE_SPECIAL;
+ elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
+
+out:
+ return;
+}
+
+ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned long now;
+ unsigned int msecs;
+
+ if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
+ return -EOPNOTSUPP;
+
+ spin_lock_irq(&hwif->lock);
+ now = jiffies;
+ if (drive->dev_flags & IDE_DFLAG_PARKED &&
+ time_after(drive->sleep, now))
+ msecs = jiffies_to_msecs(drive->sleep - now);
+ else
+ msecs = 0;
+ spin_unlock_irq(&hwif->lock);
+
+ return snprintf(buf, 20, "%u\n", msecs);
+}
+
+ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+#define MAX_PARK_TIMEOUT 30000
+ ide_drive_t *drive = to_ide_device(dev);
+ long int input;
+ int rc;
+
+ rc = strict_strtol(buf, 10, &input);
+ if (rc || input < -2)
+ return -EINVAL;
+ if (input > MAX_PARK_TIMEOUT) {
+ input = MAX_PARK_TIMEOUT;
+ rc = -EOVERFLOW;
+ }
+
+ mutex_lock(&ide_setting_mtx);
+ if (input >= 0) {
+ if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
+ rc = -EOPNOTSUPP;
+ else if (input || drive->dev_flags & IDE_DFLAG_PARKED)
+ issue_park_cmd(drive, msecs_to_jiffies(input));
+ } else {
+ if (drive->media == ide_disk)
+ switch (input) {
+ case -1:
+ drive->dev_flags &= ~IDE_DFLAG_NO_UNLOAD;
+ break;
+ case -2:
+ drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
+ break;
+ }
+ else
+ rc = -EOPNOTSUPP;
+ }
+ mutex_unlock(&ide_setting_mtx);
+
+ return rc ? rc : len;
+}
diff --git a/windhoek/ide/ide-pci-generic.c b/windhoek/ide/ide-pci-generic.c
new file mode 100644
index 00000000..f47b9326
--- /dev/null
+++ b/windhoek/ide/ide-pci-generic.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
+ * Portions (C) Copyright 2002 Red Hat Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * For the avoidance of doubt the "preferred form" of this code is one which
+ * is in an open non patent encumbered format. Where cryptographic key signing
+ * forms part of the process of creating an executable the information
+ * including keys needed to generate an equivalently functional executable
+ * are deemed to be part of the source code.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+#include <linux/init.h>
+
+#define DRV_NAME "ide_pci_generic"
+
+#include "local.h"
+
+static int ide_generic_all; /* Set to claim all devices */
+
+module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
+MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
+
+#define IDE_HFLAGS_UMC (IDE_HFLAG_NO_DMA | IDE_HFLAG_FORCE_LEGACY_IRQS)
+
+#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
+ { \
+ .name = DRV_NAME, \
+ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \
+ extra_flags, \
+ .swdma_mask = ATA_SWDMA2, \
+ .mwdma_mask = ATA_MWDMA2, \
+ .udma_mask = ATA_UDMA6, \
+ }
+
+static const struct ide_port_info generic_chipsets[] __devinitdata = {
+ /* 0: Unknown */
+ DECLARE_GENERIC_PCI_DEV(0),
+
+ { /* 1: NS87410 */
+ .name = DRV_NAME,
+ .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} },
+ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
+ .swdma_mask = ATA_SWDMA2,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ },
+
+ /* 2: SAMURAI / HT6565 / HINT_IDE */
+ DECLARE_GENERIC_PCI_DEV(0),
+ /* 3: UM8673F / UM8886A / UM8886BF */
+ DECLARE_GENERIC_PCI_DEV(IDE_HFLAGS_UMC),
+ /* 4: VIA_IDE / OPTI621V / Piccolo010{2,3,5} */
+ DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_AUTODMA),
+
+ { /* 5: VIA8237SATA */
+ .name = DRV_NAME,
+ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
+ IDE_HFLAG_OFF_BOARD,
+ .swdma_mask = ATA_SWDMA2,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ },
+
+ { /* 6: Revolution */
+ .name = DRV_NAME,
+ .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
+ IDE_HFLAG_TRUST_BIOS_FOR_DMA |
+ IDE_HFLAG_OFF_BOARD,
+ .swdma_mask = ATA_SWDMA2,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ }
+};
+
+/**
+ * generic_init_one - called when a PIIX is found
+ * @dev: the generic device
+ * @id: the matching pci id
+ *
+ * Called when the PCI registration layer (or the IDE initialization)
+ * finds a device matching our IDE device tables.
+ */
+
+static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ const struct ide_port_info *d = &generic_chipsets[id->driver_data];
+ int ret = -ENODEV;
+
+ /* Don't use the generic entry unless instructed to do so */
+ if (id->driver_data == 0 && ide_generic_all == 0)
+ goto out;
+
+ switch (dev->vendor) {
+ case PCI_VENDOR_ID_UMC:
+ if (dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
+ !(PCI_FUNC(dev->devfn) & 1))
+ goto out; /* UM8886A/BF pair */
+ break;
+ case PCI_VENDOR_ID_OPTI:
+ if (dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
+ !(PCI_FUNC(dev->devfn) & 1))
+ goto out;
+ break;
+ case PCI_VENDOR_ID_JMICRON:
+ if (dev->device != PCI_DEVICE_ID_JMICRON_JMB368 &&
+ PCI_FUNC(dev->devfn) != 1)
+ goto out;
+ break;
+ case PCI_VENDOR_ID_NS:
+ if (dev->device == PCI_DEVICE_ID_NS_87410 &&
+ (dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+ goto out;
+ break;
+ }
+
+ if (dev->vendor != PCI_VENDOR_ID_JMICRON) {
+ u16 command;
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ if (!(command & PCI_COMMAND_IO)) {
+ printk(KERN_INFO "%s %s: skipping disabled "
+ "controller\n", d->name, pci_name(dev));
+ goto out;
+ }
+ }
+ ret = ide_pci_init_one(dev, d, NULL);
+out:
+ return ret;
+}
+
+static const struct pci_device_id generic_pci_tbl[] = {
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), 1 },
+ { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), 2 },
+ { PCI_VDEVICE(HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), 2 },
+ { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8673F), 3 },
+ { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886A), 3 },
+ { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886BF), 3 },
+ { PCI_VDEVICE(HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), 2 },
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C561), 4 },
+ { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C558), 4 },
+#ifdef CONFIG_BLK_DEV_IDE_SATA
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237_SATA), 5 },
+#endif
+ { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO), 4 },
+ { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), 4 },
+ { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), 4 },
+ { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), 6 },
+ /*
+ * Must come last. If you add entries adjust
+ * this table and generic_chipsets[] appropriately.
+ */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 0 },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, generic_pci_tbl);
+
+static struct pci_driver generic_pci_driver = {
+ .name = "PCI_IDE",
+ .id_table = generic_pci_tbl,
+ .probe = generic_init_one,
+ .remove = ide_pci_remove,
+ .suspend = ide_pci_suspend,
+ .resume = ide_pci_resume,
+};
+
+static int __init generic_ide_init(void)
+{
+ return ide_pci_register_driver(&generic_pci_driver);
+}
+
+static void __exit generic_ide_exit(void)
+{
+ pci_unregister_driver(&generic_pci_driver);
+}
+
+module_init(generic_ide_init);
+module_exit(generic_ide_exit);
+
+MODULE_AUTHOR("Andre Hedrick");
+MODULE_DESCRIPTION("PCI driver module for generic PCI IDE");
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/ide-pio-blacklist.c b/windhoek/ide/ide-pio-blacklist.c
new file mode 100644
index 00000000..a8c2c8f8
--- /dev/null
+++ b/windhoek/ide/ide-pio-blacklist.c
@@ -0,0 +1,94 @@
+/*
+ * PIO blacklist. Some drives incorrectly report their maximal PIO mode,
+ * at least in respect to CMD640. Here we keep info on some known drives.
+ *
+ * Changes to the ide_pio_blacklist[] should be made with EXTREME CAUTION
+ * to avoid breaking the fragile cmd640.c support.
+ */
+
+#include <linux/string.h>
+
+static struct ide_pio_info {
+ const char *name;
+ int pio;
+} ide_pio_blacklist [] = {
+ { "Conner Peripherals 540MB - CFS540A", 3 },
+
+ { "WDC AC2700", 3 },
+ { "WDC AC2540", 3 },
+ { "WDC AC2420", 3 },
+ { "WDC AC2340", 3 },
+ { "WDC AC2250", 0 },
+ { "WDC AC2200", 0 },
+ { "WDC AC21200", 4 },
+ { "WDC AC2120", 0 },
+ { "WDC AC2850", 3 },
+ { "WDC AC1270", 3 },
+ { "WDC AC1170", 1 },
+ { "WDC AC1210", 1 },
+ { "WDC AC280", 0 },
+ { "WDC AC31000", 3 },
+ { "WDC AC31200", 3 },
+
+ { "Maxtor 7131 AT", 1 },
+ { "Maxtor 7171 AT", 1 },
+ { "Maxtor 7213 AT", 1 },
+ { "Maxtor 7245 AT", 1 },
+ { "Maxtor 7345 AT", 1 },
+ { "Maxtor 7546 AT", 3 },
+ { "Maxtor 7540 AV", 3 },
+
+ { "SAMSUNG SHD-3121A", 1 },
+ { "SAMSUNG SHD-3122A", 1 },
+ { "SAMSUNG SHD-3172A", 1 },
+
+ { "ST5660A", 3 },
+ { "ST3660A", 3 },
+ { "ST3630A", 3 },
+ { "ST3655A", 3 },
+ { "ST3391A", 3 },
+ { "ST3390A", 1 },
+ { "ST3600A", 1 },
+ { "ST3290A", 0 },
+ { "ST3144A", 0 },
+ { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on drive)
+ according to Seagate's FIND-ATA program */
+
+ { "QUANTUM ELS127A", 0 },
+ { "QUANTUM ELS170A", 0 },
+ { "QUANTUM LPS240A", 0 },
+ { "QUANTUM LPS210A", 3 },
+ { "QUANTUM LPS270A", 3 },
+ { "QUANTUM LPS365A", 3 },
+ { "QUANTUM LPS540A", 3 },
+ { "QUANTUM LIGHTNING 540A", 3 },
+ { "QUANTUM LIGHTNING 730A", 3 },
+
+ { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */
+ { "QUANTUM FIREBALL_640", 3 },
+ { "QUANTUM FIREBALL_1080", 3 },
+ { "QUANTUM FIREBALL_1280", 3 },
+ { NULL, 0 }
+};
+
+/**
+ * ide_scan_pio_blacklist - check for a blacklisted drive
+ * @model: Drive model string
+ *
+ * This routine searches the ide_pio_blacklist for an entry
+ * matching the start/whole of the supplied model name.
+ *
+ * Returns -1 if no match found.
+ * Otherwise returns the recommended PIO mode from ide_pio_blacklist[].
+ */
+
+int ide_scan_pio_blacklist(char *model)
+{
+ struct ide_pio_info *p;
+
+ for (p = ide_pio_blacklist; p->name != NULL; p++) {
+ if (strncmp(p->name, model, strlen(p->name)) == 0)
+ return p->pio;
+ }
+ return -1;
+}
diff --git a/windhoek/ide/ide-pm.c b/windhoek/ide/ide-pm.c
new file mode 100644
index 00000000..60538d9c
--- /dev/null
+++ b/windhoek/ide/ide-pm.c
@@ -0,0 +1,239 @@
+#include <linux/kernel.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+
+int generic_ide_suspend(struct device *dev, pm_message_t mesg)
+{
+ ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq;
+ struct request_pm_state rqpm;
+ ide_task_t args;
+ int ret;
+
+ /* call ACPI _GTM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL)
+ ide_acpi_get_timing(hwif);
+
+ memset(&rqpm, 0, sizeof(rqpm));
+ memset(&args, 0, sizeof(args));
+ rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_PM_SUSPEND;
+ rq->special = &args;
+ rq->data = &rqpm;
+ rqpm.pm_step = IDE_PM_START_SUSPEND;
+ if (mesg.event == PM_EVENT_PRETHAW)
+ mesg.event = PM_EVENT_FREEZE;
+ rqpm.pm_state = mesg.event;
+
+ ret = blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_put_request(rq);
+
+ /* call ACPI _PS3 only after both devices are suspended */
+ if (ret == 0 && ((drive->dn & 1) || pair == NULL))
+ ide_acpi_set_state(hwif, 0);
+
+ return ret;
+}
+
+int generic_ide_resume(struct device *dev)
+{
+ ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq;
+ struct request_pm_state rqpm;
+ ide_task_t args;
+ int err;
+
+ /* call ACPI _PS0 / _STM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL) {
+ ide_acpi_set_state(hwif, 1);
+ ide_acpi_push_timing(hwif);
+ }
+
+ ide_acpi_exec_tfs(drive);
+
+ memset(&rqpm, 0, sizeof(rqpm));
+ memset(&args, 0, sizeof(args));
+ rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_PM_RESUME;
+ rq->cmd_flags |= REQ_PREEMPT;
+ rq->special = &args;
+ rq->data = &rqpm;
+ rqpm.pm_step = IDE_PM_START_RESUME;
+ rqpm.pm_state = PM_EVENT_ON;
+
+ err = blk_execute_rq(drive->queue, NULL, rq, 1);
+ blk_put_request(rq);
+
+ if (err == 0 && dev->driver) {
+ struct ide_driver *drv = to_ide_driver(dev->driver);
+
+ if (drv->resume)
+ drv->resume(drive);
+ }
+
+ return err;
+}
+
+void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
+{
+ struct request_pm_state *pm = rq->data;
+
+#ifdef DEBUG_PM
+ printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
+ drive->name, pm->pm_step);
+#endif
+ if (drive->media != ide_disk)
+ return;
+
+ switch (pm->pm_step) {
+ case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
+ if (pm->pm_state == PM_EVENT_FREEZE)
+ pm->pm_step = IDE_PM_COMPLETED;
+ else
+ pm->pm_step = IDE_PM_STANDBY;
+ break;
+ case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
+ pm->pm_step = IDE_PM_COMPLETED;
+ break;
+ case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
+ pm->pm_step = IDE_PM_IDLE;
+ break;
+ case IDE_PM_IDLE: /* Resume step 2 (idle)*/
+ pm->pm_step = IDE_PM_RESTORE_DMA;
+ break;
+ }
+}
+
+ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
+{
+ struct request_pm_state *pm = rq->data;
+ ide_task_t *args = rq->special;
+
+ memset(args, 0, sizeof(*args));
+
+ switch (pm->pm_step) {
+ case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
+ if (drive->media != ide_disk)
+ break;
+ /* Not supported? Switch to next step now. */
+ if (ata_id_flush_enabled(drive->id) == 0 ||
+ (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
+ ide_complete_power_step(drive, rq);
+ return ide_stopped;
+ }
+ if (ata_id_flush_ext_enabled(drive->id))
+ args->tf.command = ATA_CMD_FLUSH_EXT;
+ else
+ args->tf.command = ATA_CMD_FLUSH;
+ goto out_do_tf;
+ case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
+ args->tf.command = ATA_CMD_STANDBYNOW1;
+ goto out_do_tf;
+ case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
+ ide_set_max_pio(drive);
+ /*
+ * skip IDE_PM_IDLE for ATAPI devices
+ */
+ if (drive->media != ide_disk)
+ pm->pm_step = IDE_PM_RESTORE_DMA;
+ else
+ ide_complete_power_step(drive, rq);
+ return ide_stopped;
+ case IDE_PM_IDLE: /* Resume step 2 (idle) */
+ args->tf.command = ATA_CMD_IDLEIMMEDIATE;
+ goto out_do_tf;
+ case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
+ /*
+ * Right now, all we do is call ide_set_dma(drive),
+ * we could be smarter and check for current xfer_speed
+ * in struct drive etc...
+ */
+ if (drive->hwif->dma_ops == NULL)
+ break;
+ /*
+ * TODO: respect IDE_DFLAG_USING_DMA
+ */
+ ide_set_dma(drive);
+ break;
+ }
+
+ pm->pm_step = IDE_PM_COMPLETED;
+ return ide_stopped;
+
+out_do_tf:
+ args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ args->data_phase = TASKFILE_NO_DATA;
+ return do_rw_taskfile(drive, args);
+}
+
+/**
+ * ide_complete_pm_request - end the current Power Management request
+ * @drive: target drive
+ * @rq: request
+ *
+ * This function cleans up the current PM request and stops the queue
+ * if necessary.
+ */
+void ide_complete_pm_request(ide_drive_t *drive, struct request *rq)
+{
+ struct request_queue *q = drive->queue;
+ unsigned long flags;
+
+#ifdef DEBUG_PM
+ printk("%s: completing PM request, %s\n", drive->name,
+ blk_pm_suspend_request(rq) ? "suspend" : "resume");
+#endif
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (blk_pm_suspend_request(rq))
+ blk_stop_queue(q);
+ else
+ drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ drive->hwif->rq = NULL;
+
+ if (blk_end_request(rq, 0, 0))
+ BUG();
+}
+
+void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
+{
+ struct request_pm_state *pm = rq->data;
+
+ if (blk_pm_suspend_request(rq) &&
+ pm->pm_step == IDE_PM_START_SUSPEND)
+ /* Mark drive blocked when starting the suspend sequence. */
+ drive->dev_flags |= IDE_DFLAG_BLOCKED;
+ else if (blk_pm_resume_request(rq) &&
+ pm->pm_step == IDE_PM_START_RESUME) {
+ /*
+ * The first thing we do on wakeup is to wait for BSY bit to
+ * go away (with a looong timeout) as a drive on this hwif may
+ * just be POSTing itself.
+ * We do that before even selecting as the "other" device on
+ * the bus may be broken enough to walk on our toes at this
+ * point.
+ */
+ ide_hwif_t *hwif = drive->hwif;
+ struct request_queue *q = drive->queue;
+ unsigned long flags;
+ int rc;
+#ifdef DEBUG_PM
+ printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
+#endif
+ rc = ide_wait_not_busy(hwif, 35000);
+ if (rc)
+ printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
+ SELECT_DRIVE(drive);
+ hwif->tp_ops->set_irq(hwif, 1);
+ rc = ide_wait_not_busy(hwif, 100000);
+ if (rc)
+ printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
diff --git a/windhoek/ide/ide-pnp.c b/windhoek/ide/ide-pnp.c
new file mode 100644
index 00000000..bac9b392
--- /dev/null
+++ b/windhoek/ide/ide-pnp.c
@@ -0,0 +1,107 @@
+/*
+ * This file provides autodetection for ISA PnP IDE interfaces.
+ * It was tested with "ESS ES1868 Plug and Play AudioDrive" IDE interface.
+ *
+ * Copyright (C) 2000 Andrey Panin <pazke@donpac.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/pnp.h>
+#include <linux/ide.h>
+
+#define DRV_NAME "ide-pnp"
+
+/* Add your devices here :)) */
+static struct pnp_device_id idepnp_devices[] = {
+ /* Generic ESDI/IDE/ATA compatible hard disk controller */
+ {.id = "PNP0600", .driver_data = 0},
+ {.id = ""}
+};
+
+static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
+{
+ struct ide_host *host;
+ unsigned long base, ctl;
+ int rc;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+
+ printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
+
+ if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
+ return -1;
+
+ base = pnp_port_start(dev, 0);
+ ctl = pnp_port_start(dev, 1);
+
+ if (!request_region(base, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+ DRV_NAME, base, base + 7);
+ return -EBUSY;
+ }
+
+ if (!request_region(ctl, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+ DRV_NAME, ctl);
+ release_region(base, 8);
+ return -EBUSY;
+ }
+
+ memset(&hw, 0, sizeof(hw));
+ ide_std_init_ports(&hw, base, ctl);
+ hw.irq = pnp_irq(dev, 0);
+ hw.chipset = ide_generic;
+
+ rc = ide_host_add(NULL, hws, &host);
+ if (rc)
+ goto out;
+
+ pnp_set_drvdata(dev, host);
+
+ return 0;
+out:
+ release_region(ctl, 1);
+ release_region(base, 8);
+
+ return rc;
+}
+
+static void idepnp_remove(struct pnp_dev *dev)
+{
+ struct ide_host *host = pnp_get_drvdata(dev);
+
+ ide_host_remove(host);
+
+ release_region(pnp_port_start(dev, 1), 1);
+ release_region(pnp_port_start(dev, 0), 8);
+}
+
+static struct pnp_driver idepnp_driver = {
+ .name = "ide",
+ .id_table = idepnp_devices,
+ .probe = idepnp_probe,
+ .remove = idepnp_remove,
+};
+
+static int __init pnpide_init(void)
+{
+ return pnp_register_driver(&idepnp_driver);
+}
+
+static void __exit pnpide_exit(void)
+{
+ pnp_unregister_driver(&idepnp_driver);
+}
+
+module_init(pnpide_init);
+module_exit(pnpide_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/ide-probe.c b/windhoek/ide/ide-probe.c
new file mode 100644
index 00000000..8ac2aee8
--- /dev/null
+++ b/windhoek/ide/ide-probe.c
@@ -0,0 +1,1739 @@
+/*
+ * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
+ * Copyright (C) 2005, 2007 Bartlomiej Zolnierkiewicz
+ */
+
+/*
+ * Mostly written by Mark Lord <mlord@pobox.com>
+ * and Gadi Oxman <gadio@netvision.net.il>
+ * and Andre Hedrick <andre@linux-ide.org>
+ *
+ * See linux/MAINTAINERS for address of current maintainer.
+ *
+ * This is the IDE probe module, as evolved from hd.c and ide.c.
+ *
+ * -- increase WAIT_PIDENTIFY to avoid CD-ROM locking at boot
+ * by Andrea Arcangeli
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/spinlock.h>
+#include <linux/kmod.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include "local.h"
+
+/**
+ * generic_id - add a generic drive id
+ * @drive: drive to make an ID block for
+ *
+ * Add a fake id field to the drive we are passed. This allows
+ * use to skip a ton of NULL checks (which people always miss)
+ * and make drive properties unconditional outside of this file
+ */
+
+static void generic_id(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+
+ id[ATA_ID_CUR_CYLS] = id[ATA_ID_CYLS] = drive->cyl;
+ id[ATA_ID_CUR_HEADS] = id[ATA_ID_HEADS] = drive->head;
+ id[ATA_ID_CUR_SECTORS] = id[ATA_ID_SECTORS] = drive->sect;
+}
+
+static void ide_disk_init_chs(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+
+ /* Extract geometry if we did not already have one for the drive */
+ if (!drive->cyl || !drive->head || !drive->sect) {
+ drive->cyl = drive->bios_cyl = id[ATA_ID_CYLS];
+ drive->head = drive->bios_head = id[ATA_ID_HEADS];
+ drive->sect = drive->bios_sect = id[ATA_ID_SECTORS];
+ }
+
+ /* Handle logical geometry translation by the drive */
+ if (ata_id_current_chs_valid(id)) {
+ drive->cyl = id[ATA_ID_CUR_CYLS];
+ drive->head = id[ATA_ID_CUR_HEADS];
+ drive->sect = id[ATA_ID_CUR_SECTORS];
+ }
+
+ /* Use physical geometry if what we have still makes no sense */
+ if (drive->head > 16 && id[ATA_ID_HEADS] && id[ATA_ID_HEADS] <= 16) {
+ drive->cyl = id[ATA_ID_CYLS];
+ drive->head = id[ATA_ID_HEADS];
+ drive->sect = id[ATA_ID_SECTORS];
+ }
+}
+
+static void ide_disk_init_mult_count(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ u8 max_multsect = id[ATA_ID_MAX_MULTSECT] & 0xff;
+
+ if (max_multsect) {
+ if ((max_multsect / 2) > 1)
+ id[ATA_ID_MULTSECT] = max_multsect | 0x100;
+ else
+ id[ATA_ID_MULTSECT] &= ~0x1ff;
+
+ drive->mult_req = id[ATA_ID_MULTSECT] & 0xff;
+
+ if (drive->mult_req)
+ drive->special.b.set_multmode = 1;
+ }
+}
+
+static void ide_classify_ata_dev(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ char *m = (char *)&id[ATA_ID_PROD];
+ int is_cfa = ata_id_is_cfa(id);
+
+ /* CF devices are *not* removable in Linux definition of the term */
+ if (is_cfa == 0 && (id[ATA_ID_CONFIG] & (1 << 7)))
+ drive->dev_flags |= IDE_DFLAG_REMOVABLE;
+
+ drive->media = ide_disk;
+
+ if (!ata_id_has_unload(drive->id))
+ drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
+
+ printk(KERN_INFO "%s: %s, %s DISK drive\n", drive->name, m,
+ is_cfa ? "CFA" : "ATA");
+}
+
+static void ide_classify_atapi_dev(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ char *m = (char *)&id[ATA_ID_PROD];
+ u8 type = (id[ATA_ID_CONFIG] >> 8) & 0x1f;
+
+ printk(KERN_INFO "%s: %s, ATAPI ", drive->name, m);
+ switch (type) {
+ case ide_floppy:
+ if (!strstr(m, "CD-ROM")) {
+ if (!strstr(m, "oppy") &&
+ !strstr(m, "poyp") &&
+ !strstr(m, "ZIP"))
+ printk(KERN_CONT "cdrom or floppy?, assuming ");
+ if (drive->media != ide_cdrom) {
+ printk(KERN_CONT "FLOPPY");
+ drive->dev_flags |= IDE_DFLAG_REMOVABLE;
+ break;
+ }
+ }
+ /* Early cdrom models used zero */
+ type = ide_cdrom;
+ case ide_cdrom:
+ drive->dev_flags |= IDE_DFLAG_REMOVABLE;
+#ifdef CONFIG_PPC
+ /* kludge for Apple PowerBook internal zip */
+ if (!strstr(m, "CD-ROM") && strstr(m, "ZIP")) {
+ printk(KERN_CONT "FLOPPY");
+ type = ide_floppy;
+ break;
+ }
+#endif
+ printk(KERN_CONT "CD/DVD-ROM");
+ break;
+ case ide_tape:
+ printk(KERN_CONT "TAPE");
+ break;
+ case ide_optical:
+ printk(KERN_CONT "OPTICAL");
+ drive->dev_flags |= IDE_DFLAG_REMOVABLE;
+ break;
+ default:
+ printk(KERN_CONT "UNKNOWN (type %d)", type);
+ break;
+ }
+
+ printk(KERN_CONT " drive\n");
+ drive->media = type;
+ /* an ATAPI device ignores DRDY */
+ drive->ready_stat = 0;
+ if (ata_id_cdb_intr(id))
+ drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
+ drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
+ /* we don't do head unloading on ATAPI devices */
+ drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
+}
+
+/**
+ * do_identify - identify a drive
+ * @drive: drive to identify
+ * @cmd: command used
+ *
+ * Called when we have issued a drive identify command to
+ * read and parse the results. This function is run with
+ * interrupts disabled.
+ */
+
+static void do_identify(ide_drive_t *drive, u8 cmd)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u16 *id = drive->id;
+ char *m = (char *)&id[ATA_ID_PROD];
+ unsigned long flags;
+ int bswap = 1;
+
+ /* local CPU only; some systems need this */
+ local_irq_save(flags);
+ /* read 512 bytes of id info */
+ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+ local_irq_restore(flags);
+
+ drive->dev_flags |= IDE_DFLAG_ID_READ;
+#ifdef DEBUG
+ printk(KERN_INFO "%s: dumping identify data\n", drive->name);
+ ide_dump_identify((u8 *)id);
+#endif
+ ide_fix_driveid(id);
+
+ /*
+ * ATA_CMD_ID_ATA returns little-endian info,
+ * ATA_CMD_ID_ATAPI *usually* returns little-endian info.
+ */
+ if (cmd == ATA_CMD_ID_ATAPI) {
+ if ((m[0] == 'N' && m[1] == 'E') || /* NEC */
+ (m[0] == 'F' && m[1] == 'X') || /* Mitsumi */
+ (m[0] == 'P' && m[1] == 'i')) /* Pioneer */
+ /* Vertos drives may still be weird */
+ bswap ^= 1;
+ }
+
+ ide_fixstring(m, ATA_ID_PROD_LEN, bswap);
+ ide_fixstring((char *)&id[ATA_ID_FW_REV], ATA_ID_FW_REV_LEN, bswap);
+ ide_fixstring((char *)&id[ATA_ID_SERNO], ATA_ID_SERNO_LEN, bswap);
+
+ /* we depend on this a lot! */
+ m[ATA_ID_PROD_LEN - 1] = '\0';
+
+ if (strstr(m, "E X A B Y T E N E S T"))
+ goto err_misc;
+
+ drive->dev_flags |= IDE_DFLAG_PRESENT;
+ drive->dev_flags &= ~IDE_DFLAG_DEAD;
+
+ /*
+ * Check for an ATAPI device
+ */
+ if (cmd == ATA_CMD_ID_ATAPI)
+ ide_classify_atapi_dev(drive);
+ else
+ /*
+ * Not an ATAPI device: looks like a "regular" hard disk
+ */
+ ide_classify_ata_dev(drive);
+ return;
+err_misc:
+ kfree(id);
+ drive->dev_flags &= ~IDE_DFLAG_PRESENT;
+}
+
+/**
+ * actual_try_to_identify - send ata/atapi identify
+ * @drive: drive to identify
+ * @cmd: command to use
+ *
+ * try_to_identify() sends an ATA(PI) IDENTIFY request to a drive
+ * and waits for a response. It also monitors irqs while this is
+ * happening, in hope of automatically determining which one is
+ * being used by the interface.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ */
+
+static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ int use_altstatus = 0, rc;
+ unsigned long timeout;
+ u8 s = 0, a = 0;
+
+ /* take a deep breath */
+ msleep(50);
+
+ if (io_ports->ctl_addr &&
+ (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) {
+ a = tp_ops->read_altstatus(hwif);
+ s = tp_ops->read_status(hwif);
+ if ((a ^ s) & ~ATA_IDX)
+ /* ancient Seagate drives, broken interfaces */
+ printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
+ "instead of ALTSTATUS(0x%02x)\n",
+ drive->name, s, a);
+ else
+ /* use non-intrusive polling */
+ use_altstatus = 1;
+ }
+
+ /* set features register for atapi
+ * identify command to be sure of reply
+ */
+ if (cmd == ATA_CMD_ID_ATAPI) {
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ /* disable DMA & overlap */
+ task.tf_flags = IDE_TFLAG_OUT_FEATURE;
+
+ tp_ops->tf_load(drive, &task);
+ }
+
+ /* ask drive for ID */
+ tp_ops->exec_command(hwif, cmd);
+
+ timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
+
+ if (ide_busy_sleep(hwif, timeout, use_altstatus))
+ return 1;
+
+ /* wait for IRQ and ATA_DRQ */
+ msleep(50);
+ s = tp_ops->read_status(hwif);
+
+ if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) {
+ /* drive returned ID */
+ do_identify(drive, cmd);
+ /* drive responded with ID */
+ rc = 0;
+ /* clear drive IRQ */
+ (void)tp_ops->read_status(hwif);
+ } else {
+ /* drive refused ID */
+ rc = 2;
+ }
+ return rc;
+}
+
+/**
+ * try_to_identify - try to identify a drive
+ * @drive: drive to probe
+ * @cmd: command to use
+ *
+ * Issue the identify command and then do IRQ probing to
+ * complete the identification when needed by finding the
+ * IRQ the drive is attached to
+ */
+
+static int try_to_identify (ide_drive_t *drive, u8 cmd)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ int retval;
+ int autoprobe = 0;
+ unsigned long cookie = 0;
+
+ /*
+ * Disable device irq unless we need to
+ * probe for it. Otherwise we'll get spurious
+ * interrupts during the identify-phase that
+ * the irq handler isn't expecting.
+ */
+ if (hwif->io_ports.ctl_addr) {
+ if (!hwif->irq) {
+ autoprobe = 1;
+ cookie = probe_irq_on();
+ }
+ tp_ops->set_irq(hwif, autoprobe);
+ }
+
+ retval = actual_try_to_identify(drive, cmd);
+
+ if (autoprobe) {
+ int irq;
+
+ tp_ops->set_irq(hwif, 0);
+ /* clear drive IRQ */
+ (void)tp_ops->read_status(hwif);
+ udelay(5);
+ irq = probe_irq_off(cookie);
+ if (!hwif->irq) {
+ if (irq > 0) {
+ hwif->irq = irq;
+ } else {
+ /* Mmmm.. multiple IRQs..
+ * don't know which was ours
+ */
+ printk(KERN_ERR "%s: IRQ probe failed (0x%lx)\n",
+ drive->name, cookie);
+ }
+ }
+ }
+ return retval;
+}
+
+int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus)
+{
+ u8 stat;
+
+ timeout += jiffies;
+
+ do {
+ msleep(50); /* give drive a breather */
+ stat = altstatus ? hwif->tp_ops->read_altstatus(hwif)
+ : hwif->tp_ops->read_status(hwif);
+ if ((stat & ATA_BUSY) == 0)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return 1; /* drive timed-out */
+}
+
+static u8 ide_read_device(ide_drive_t *drive)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_DEVICE;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ return task.tf.device;
+}
+
+/**
+ * do_probe - probe an IDE device
+ * @drive: drive to probe
+ * @cmd: command to use
+ *
+ * do_probe() has the difficult job of finding a drive if it exists,
+ * without getting hung up if it doesn't exist, without trampling on
+ * ethernet cards, and without leaving any IRQs dangling to haunt us later.
+ *
+ * If a drive is "known" to exist (from CMOS or kernel parameters),
+ * but does not respond right away, the probe will "hang in there"
+ * for the maximum wait time (about 30 seconds), otherwise it will
+ * exit much more quickly.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ * 3 bad status from device (possible for ATAPI drives)
+ * 4 probe was not attempted because failure was obvious
+ */
+
+static int do_probe (ide_drive_t *drive, u8 cmd)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ int rc;
+ u8 present = !!(drive->dev_flags & IDE_DFLAG_PRESENT), stat;
+
+ /* avoid waiting for inappropriate probes */
+ if (present && drive->media != ide_disk && cmd == ATA_CMD_ID_ATA)
+ return 4;
+
+#ifdef DEBUG
+ printk(KERN_INFO "probing for %s: present=%d, media=%d, probetype=%s\n",
+ drive->name, present, drive->media,
+ (cmd == ATA_CMD_ID_ATA) ? "ATA" : "ATAPI");
+#endif
+
+ /* needed for some systems
+ * (e.g. crw9624 as drive0 with disk as slave)
+ */
+ msleep(50);
+ SELECT_DRIVE(drive);
+ msleep(50);
+
+ if (ide_read_device(drive) != drive->select && present == 0) {
+ if (drive->dn & 1) {
+ /* exit with drive0 selected */
+ SELECT_DRIVE(hwif->devices[0]);
+ /* allow ATA_BUSY to assert & clear */
+ msleep(50);
+ }
+ /* no i/f present: mmm.. this should be a 4 -ml */
+ return 3;
+ }
+
+ stat = tp_ops->read_status(hwif);
+
+ if (OK_STAT(stat, ATA_DRDY, ATA_BUSY) ||
+ present || cmd == ATA_CMD_ID_ATAPI) {
+ /* send cmd and wait */
+ if ((rc = try_to_identify(drive, cmd))) {
+ /* failed: try again */
+ rc = try_to_identify(drive,cmd);
+ }
+
+ stat = tp_ops->read_status(hwif);
+
+ if (stat == (ATA_BUSY | ATA_DRDY))
+ return 4;
+
+ if (rc == 1 && cmd == ATA_CMD_ID_ATAPI) {
+ printk(KERN_ERR "%s: no response (status = 0x%02x), "
+ "resetting drive\n", drive->name, stat);
+ msleep(50);
+ SELECT_DRIVE(drive);
+ msleep(50);
+ tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
+ (void)ide_busy_sleep(hwif, WAIT_WORSTCASE, 0);
+ rc = try_to_identify(drive, cmd);
+ }
+
+ /* ensure drive IRQ is clear */
+ stat = tp_ops->read_status(hwif);
+
+ if (rc == 1)
+ printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
+ drive->name, stat);
+ } else {
+ /* not present or maybe ATAPI */
+ rc = 3;
+ }
+ if (drive->dn & 1) {
+ /* exit with drive0 selected */
+ SELECT_DRIVE(hwif->devices[0]);
+ msleep(50);
+ /* ensure drive irq is clear */
+ (void)tp_ops->read_status(hwif);
+ }
+ return rc;
+}
+
+/*
+ *
+ */
+static void enable_nest (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ u8 stat;
+
+ printk(KERN_INFO "%s: enabling %s -- ",
+ hwif->name, (char *)&drive->id[ATA_ID_PROD]);
+
+ SELECT_DRIVE(drive);
+ msleep(50);
+ tp_ops->exec_command(hwif, ATA_EXABYTE_ENABLE_NEST);
+
+ if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 0)) {
+ printk(KERN_CONT "failed (timeout)\n");
+ return;
+ }
+
+ msleep(50);
+
+ stat = tp_ops->read_status(hwif);
+
+ if (!OK_STAT(stat, 0, BAD_STAT))
+ printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
+ else
+ printk(KERN_CONT "success\n");
+}
+
+/**
+ * probe_for_drives - upper level drive probe
+ * @drive: drive to probe for
+ *
+ * probe_for_drive() tests for existence of a given drive using do_probe()
+ * and presents things to the user as needed.
+ *
+ * Returns: 0 no device was found
+ * 1 device was found
+ * (note: IDE_DFLAG_PRESENT might still be not set)
+ */
+
+static u8 probe_for_drive(ide_drive_t *drive)
+{
+ char *m;
+
+ /*
+ * In order to keep things simple we have an id
+ * block for all drives at all times. If the device
+ * is pre ATA or refuses ATA/ATAPI identify we
+ * will add faked data to this.
+ *
+ * Also note that 0 everywhere means "can't do X"
+ */
+
+ drive->dev_flags &= ~IDE_DFLAG_ID_READ;
+
+ drive->id = kzalloc(SECTOR_SIZE, GFP_KERNEL);
+ if (drive->id == NULL) {
+ printk(KERN_ERR "ide: out of memory for id data.\n");
+ return 0;
+ }
+
+ m = (char *)&drive->id[ATA_ID_PROD];
+ strcpy(m, "UNKNOWN");
+
+ /* skip probing? */
+ if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0) {
+retry:
+ /* if !(success||timed-out) */
+ if (do_probe(drive, ATA_CMD_ID_ATA) >= 2)
+ /* look for ATAPI device */
+ (void)do_probe(drive, ATA_CMD_ID_ATAPI);
+
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ /* drive not found */
+ return 0;
+
+ if (strstr(m, "E X A B Y T E N E S T")) {
+ enable_nest(drive);
+ goto retry;
+ }
+
+ /* identification failed? */
+ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
+ if (drive->media == ide_disk) {
+ printk(KERN_INFO "%s: non-IDE drive, CHS=%d/%d/%d\n",
+ drive->name, drive->cyl,
+ drive->head, drive->sect);
+ } else if (drive->media == ide_cdrom) {
+ printk(KERN_INFO "%s: ATAPI cdrom (?)\n", drive->name);
+ } else {
+ /* nuke it */
+ printk(KERN_WARNING "%s: Unknown device on bus refused identification. Ignoring.\n", drive->name);
+ drive->dev_flags &= ~IDE_DFLAG_PRESENT;
+ }
+ }
+ /* drive was found */
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ return 0;
+
+ /* The drive wasn't being helpful. Add generic info only */
+ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
+ generic_id(drive);
+ return 1;
+ }
+
+ if (drive->media == ide_disk) {
+ ide_disk_init_chs(drive);
+ ide_disk_init_mult_count(drive);
+ }
+
+ return !!(drive->dev_flags & IDE_DFLAG_PRESENT);
+}
+
+static void hwif_release_dev(struct device *dev)
+{
+ ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
+
+ complete(&hwif->gendev_rel_comp);
+}
+
+static int ide_register_port(ide_hwif_t *hwif)
+{
+ int ret;
+
+ /* register with global device tree */
+ dev_set_name(&hwif->gendev, hwif->name);
+ hwif->gendev.driver_data = hwif;
+ if (hwif->gendev.parent == NULL)
+ hwif->gendev.parent = hwif->dev;
+ hwif->gendev.release = hwif_release_dev;
+
+ ret = device_register(&hwif->gendev);
+ if (ret < 0) {
+ printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ hwif->portdev = device_create(ide_port_class, &hwif->gendev,
+ MKDEV(0, 0), hwif, hwif->name);
+ if (IS_ERR(hwif->portdev)) {
+ ret = PTR_ERR(hwif->portdev);
+ device_unregister(&hwif->gendev);
+ }
+out:
+ return ret;
+}
+
+/**
+ * ide_port_wait_ready - wait for port to become ready
+ * @hwif: IDE port
+ *
+ * This is needed on some PPCs and a bunch of BIOS-less embedded
+ * platforms. Typical cases are:
+ *
+ * - The firmware hard reset the disk before booting the kernel,
+ * the drive is still doing it's poweron-reset sequence, that
+ * can take up to 30 seconds.
+ *
+ * - The firmware does nothing (or no firmware), the device is
+ * still in POST state (same as above actually).
+ *
+ * - Some CD/DVD/Writer combo drives tend to drive the bus during
+ * their reset sequence even when they are non-selected slave
+ * devices, thus preventing discovery of the main HD.
+ *
+ * Doing this wait-for-non-busy should not harm any existing
+ * configuration and fix some issues like the above.
+ *
+ * BenH.
+ *
+ * Returns 0 on success, error code (< 0) otherwise.
+ */
+
+static int ide_port_wait_ready(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i, rc;
+
+ printk(KERN_DEBUG "Probing IDE interface %s...\n", hwif->name);
+
+ /* Let HW settle down a bit from whatever init state we
+ * come from */
+ mdelay(2);
+
+ /* Wait for BSY bit to go away, spec timeout is 30 seconds,
+ * I know of at least one disk who takes 31 seconds, I use 35
+ * here to be safe
+ */
+ rc = ide_wait_not_busy(hwif, 35000);
+ if (rc)
+ return rc;
+
+ /* Now make sure both master & slave are ready */
+ ide_port_for_each_dev(i, drive, hwif) {
+ /* Ignore disks that we will not probe for later. */
+ if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0 ||
+ (drive->dev_flags & IDE_DFLAG_PRESENT)) {
+ SELECT_DRIVE(drive);
+ hwif->tp_ops->set_irq(hwif, 1);
+ mdelay(2);
+ rc = ide_wait_not_busy(hwif, 35000);
+ if (rc)
+ goto out;
+ } else
+ printk(KERN_DEBUG "%s: ide_wait_not_busy() skipped\n",
+ drive->name);
+ }
+out:
+ /* Exit function with master reselected (let's be sane) */
+ if (i)
+ SELECT_DRIVE(hwif->devices[0]);
+
+ return rc;
+}
+
+/**
+ * ide_undecoded_slave - look for bad CF adapters
+ * @dev1: slave device
+ *
+ * Analyse the drives on the interface and attempt to decide if we
+ * have the same drive viewed twice. This occurs with crap CF adapters
+ * and PCMCIA sometimes.
+ */
+
+void ide_undecoded_slave(ide_drive_t *dev1)
+{
+ ide_drive_t *dev0 = dev1->hwif->devices[0];
+
+ if ((dev1->dn & 1) == 0 || (dev0->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ return;
+
+ /* If the models don't match they are not the same product */
+ if (strcmp((char *)&dev0->id[ATA_ID_PROD],
+ (char *)&dev1->id[ATA_ID_PROD]))
+ return;
+
+ /* Serial numbers do not match */
+ if (strncmp((char *)&dev0->id[ATA_ID_SERNO],
+ (char *)&dev1->id[ATA_ID_SERNO], ATA_ID_SERNO_LEN))
+ return;
+
+ /* No serial number, thankfully very rare for CF */
+ if (*(char *)&dev0->id[ATA_ID_SERNO] == 0)
+ return;
+
+ /* Appears to be an IDE flash adapter with decode bugs */
+ printk(KERN_WARNING "ide-probe: ignoring undecoded slave\n");
+
+ dev1->dev_flags &= ~IDE_DFLAG_PRESENT;
+}
+
+EXPORT_SYMBOL_GPL(ide_undecoded_slave);
+
+static int ide_probe_port(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ unsigned long flags;
+ unsigned int irqd;
+ int i, rc = -ENODEV;
+
+ BUG_ON(hwif->present);
+
+ if ((hwif->devices[0]->dev_flags & IDE_DFLAG_NOPROBE) &&
+ (hwif->devices[1]->dev_flags & IDE_DFLAG_NOPROBE))
+ return -EACCES;
+
+ /*
+ * We must always disable IRQ, as probe_for_drive will assert IRQ, but
+ * we'll install our IRQ driver much later...
+ */
+ irqd = hwif->irq;
+ if (irqd)
+ disable_irq(hwif->irq);
+
+ local_save_flags(flags);
+ local_irq_enable_in_hardirq();
+
+ if (ide_port_wait_ready(hwif) == -EBUSY)
+ printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
+
+ /*
+ * Second drive should only exist if first drive was found,
+ * but a lot of cdrom drives are configured as single slaves.
+ */
+ ide_port_for_each_dev(i, drive, hwif) {
+ (void) probe_for_drive(drive);
+ if (drive->dev_flags & IDE_DFLAG_PRESENT)
+ rc = 0;
+ }
+
+ local_irq_restore(flags);
+
+ /*
+ * Use cached IRQ number. It might be (and is...) changed by probe
+ * code above
+ */
+ if (irqd)
+ enable_irq(irqd);
+
+ return rc;
+}
+
+static void ide_port_tune_devices(ide_hwif_t *hwif)
+{
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if (drive->dev_flags & IDE_DFLAG_PRESENT) {
+ if (port_ops && port_ops->quirkproc)
+ port_ops->quirkproc(drive);
+ }
+ }
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if (drive->dev_flags & IDE_DFLAG_PRESENT) {
+ ide_set_max_pio(drive);
+
+ drive->dev_flags |= IDE_DFLAG_NICE1;
+
+ if (hwif->dma_ops)
+ ide_set_dma(drive);
+ }
+ }
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT)
+ drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_NO_IO_32BIT;
+ }
+}
+
+/*
+ * init request queue
+ */
+static int ide_init_queue(ide_drive_t *drive)
+{
+ struct request_queue *q;
+ ide_hwif_t *hwif = drive->hwif;
+ int max_sectors = 256;
+ int max_sg_entries = PRD_ENTRIES;
+
+ /*
+ * Our default set up assumes the normal IDE case,
+ * that is 64K segmenting, standard PRD setup
+ * and LBA28. Some drivers then impose their own
+ * limits and LBA48 we could raise it but as yet
+ * do not.
+ */
+
+ q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif));
+ if (!q)
+ return 1;
+
+ q->queuedata = drive;
+ blk_queue_segment_boundary(q, 0xffff);
+
+ if (hwif->rqsize < max_sectors)
+ max_sectors = hwif->rqsize;
+ blk_queue_max_sectors(q, max_sectors);
+
+#ifdef CONFIG_PCI
+ /* When we have an IOMMU, we may have a problem where pci_map_sg()
+ * creates segments that don't completely match our boundary
+ * requirements and thus need to be broken up again. Because it
+ * doesn't align properly either, we may actually have to break up
+ * to more segments than what was we got in the first place, a max
+ * worst case is twice as many.
+ * This will be fixed once we teach pci_map_sg() about our boundary
+ * requirements, hopefully soon. *FIXME*
+ */
+ if (!PCI_DMA_BUS_IS_PHYS)
+ max_sg_entries >>= 1;
+#endif /* CONFIG_PCI */
+
+ blk_queue_max_hw_segments(q, max_sg_entries);
+ blk_queue_max_phys_segments(q, max_sg_entries);
+
+ /* assign drive queue */
+ drive->queue = q;
+
+ /* needs drive->queue to be set */
+ ide_toggle_bounce(drive, 1);
+
+ return 0;
+}
+
+static DEFINE_MUTEX(ide_cfg_mtx);
+
+/*
+ * For any present drive:
+ * - allocate the block device queue
+ */
+static int ide_port_setup_devices(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i, j = 0;
+
+ mutex_lock(&ide_cfg_mtx);
+ ide_port_for_each_dev(i, drive, hwif) {
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ continue;
+
+ if (ide_init_queue(drive)) {
+ printk(KERN_ERR "ide: failed to init %s\n",
+ drive->name);
+ kfree(drive->id);
+ drive->id = NULL;
+ drive->dev_flags &= ~IDE_DFLAG_PRESENT;
+ continue;
+ }
+
+ j++;
+ }
+ mutex_unlock(&ide_cfg_mtx);
+
+ return j;
+}
+
+/*
+ * This routine sets up the IRQ for an IDE interface.
+ */
+static int init_irq (ide_hwif_t *hwif)
+{
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ irq_handler_t irq_handler;
+ int sa = 0;
+
+ mutex_lock(&ide_cfg_mtx);
+ spin_lock_init(&hwif->lock);
+
+ init_timer(&hwif->timer);
+ hwif->timer.function = &ide_timer_expiry;
+ hwif->timer.data = (unsigned long)hwif;
+
+ irq_handler = hwif->host->irq_handler;
+ if (irq_handler == NULL)
+ irq_handler = ide_intr;
+
+#if defined(__mc68000__)
+ sa = IRQF_SHARED;
+#endif /* __mc68000__ */
+
+ if (hwif->chipset == ide_pci)
+ sa = IRQF_SHARED;
+
+ if (io_ports->ctl_addr)
+ hwif->tp_ops->set_irq(hwif, 1);
+
+ if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+ goto out_up;
+
+ if (!hwif->rqsize) {
+ if ((hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
+ (hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA))
+ hwif->rqsize = 256;
+ else
+ hwif->rqsize = 65536;
+ }
+
+#if !defined(__mc68000__)
+ printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
+ io_ports->data_addr, io_ports->status_addr,
+ io_ports->ctl_addr, hwif->irq);
+#else
+ printk(KERN_INFO "%s at 0x%08lx on irq %d", hwif->name,
+ io_ports->data_addr, hwif->irq);
+#endif /* __mc68000__ */
+ if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE)
+ printk(KERN_CONT " (serialized)");
+ printk(KERN_CONT "\n");
+
+ mutex_unlock(&ide_cfg_mtx);
+ return 0;
+out_up:
+ mutex_unlock(&ide_cfg_mtx);
+ return 1;
+}
+
+static int ata_lock(dev_t dev, void *data)
+{
+ /* FIXME: we want to pin hwif down */
+ return 0;
+}
+
+static struct kobject *ata_probe(dev_t dev, int *part, void *data)
+{
+ ide_hwif_t *hwif = data;
+ int unit = *part >> PARTN_BITS;
+ ide_drive_t *drive = hwif->devices[unit];
+
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ return NULL;
+
+ if (drive->media == ide_disk)
+ request_module("ide-disk");
+ if (drive->media == ide_cdrom || drive->media == ide_optical)
+ request_module("ide-cd");
+ if (drive->media == ide_tape)
+ request_module("ide-tape");
+ if (drive->media == ide_floppy)
+ request_module("ide-floppy");
+
+ return NULL;
+}
+
+static struct kobject *exact_match(dev_t dev, int *part, void *data)
+{
+ struct gendisk *p = data;
+ *part &= (1 << PARTN_BITS) - 1;
+ return &disk_to_dev(p)->kobj;
+}
+
+static int exact_lock(dev_t dev, void *data)
+{
+ struct gendisk *p = data;
+
+ if (!get_disk(p))
+ return -1;
+ return 0;
+}
+
+void ide_register_region(struct gendisk *disk)
+{
+ blk_register_region(MKDEV(disk->major, disk->first_minor),
+ disk->minors, NULL, exact_match, exact_lock, disk);
+}
+
+EXPORT_SYMBOL_GPL(ide_register_region);
+
+void ide_unregister_region(struct gendisk *disk)
+{
+ blk_unregister_region(MKDEV(disk->major, disk->first_minor),
+ disk->minors);
+}
+
+EXPORT_SYMBOL_GPL(ide_unregister_region);
+
+void ide_init_disk(struct gendisk *disk, ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned int unit = drive->dn & 1;
+
+ disk->major = hwif->major;
+ disk->first_minor = unit << PARTN_BITS;
+ sprintf(disk->disk_name, "hd%c", 'a' + hwif->index * MAX_DRIVES + unit);
+ DEBUG_MSG("disk has a name: %s", disk->disk_name);
+ disk->queue = drive->queue;
+}
+
+EXPORT_SYMBOL_GPL(ide_init_disk);
+
+static void drive_release_dev (struct device *dev)
+{
+ ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
+ ide_hwif_t *hwif = drive->hwif;
+
+ ide_proc_unregister_device(drive);
+
+ spin_lock_irq(&hwif->lock);
+ kfree(drive->id);
+ drive->id = NULL;
+ drive->dev_flags &= ~IDE_DFLAG_PRESENT;
+ /* Messed up locking ... */
+ spin_unlock_irq(&hwif->lock);
+ blk_cleanup_queue(drive->queue);
+ spin_lock_irq(&hwif->lock);
+ drive->queue = NULL;
+ spin_unlock_irq(&hwif->lock);
+
+ complete(&drive->gendev_rel_comp);
+}
+
+static int hwif_init(ide_hwif_t *hwif)
+{
+ int old_irq;
+
+ if (!hwif->irq) {
+ hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
+ if (!hwif->irq) {
+ printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name);
+ return 0;
+ }
+ }
+
+ if (register_blkdev(hwif->major, hwif->name))
+ return 0;
+
+ if (!hwif->sg_max_nents)
+ hwif->sg_max_nents = PRD_ENTRIES;
+
+ hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
+ GFP_KERNEL);
+ if (!hwif->sg_table) {
+ printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
+ goto out;
+ }
+
+ sg_init_table(hwif->sg_table, hwif->sg_max_nents);
+
+ if (init_irq(hwif) == 0)
+ goto done;
+
+ old_irq = hwif->irq;
+ /*
+ * It failed to initialise. Find the default IRQ for
+ * this port and try that.
+ */
+ hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
+ if (!hwif->irq) {
+ printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n",
+ hwif->name, old_irq);
+ goto out;
+ }
+ if (init_irq(hwif)) {
+ printk(KERN_ERR "%s: probed IRQ %d and default IRQ %d failed\n",
+ hwif->name, old_irq, hwif->irq);
+ goto out;
+ }
+ printk(KERN_WARNING "%s: probed IRQ %d failed, using default\n",
+ hwif->name, hwif->irq);
+
+done:
+ blk_register_region(MKDEV(hwif->major, 0), MAX_DRIVES << PARTN_BITS,
+ THIS_MODULE, ata_probe, ata_lock, hwif);
+ return 1;
+
+out:
+ unregister_blkdev(hwif->major, hwif->name);
+ return 0;
+}
+
+static void hwif_register_devices(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ unsigned int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ struct device *dev = &drive->gendev;
+ int ret;
+
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ continue;
+
+ dev_set_name(dev, "%u.%u", hwif->index, i);
+ dev->parent = &hwif->gendev;
+ dev->bus = &ide_bus_type;
+ dev->driver_data = drive;
+ dev->release = drive_release_dev;
+
+ ret = device_register(dev);
+ if (ret < 0)
+ printk(KERN_WARNING "IDE: %s: device_register error: "
+ "%d\n", __func__, ret);
+ }
+}
+
+static void ide_port_init_devices(ide_hwif_t *hwif)
+{
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ drive->dn = i + hwif->channel * 2;
+
+ if (hwif->host_flags & IDE_HFLAG_IO_32BIT)
+ drive->io_32bit = 1;
+ if (hwif->host_flags & IDE_HFLAG_UNMASK_IRQS)
+ drive->dev_flags |= IDE_DFLAG_UNMASK;
+ if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
+ drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
+
+ if (port_ops && port_ops->init_dev)
+ port_ops->init_dev(drive);
+ }
+}
+
+static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
+ const struct ide_port_info *d)
+{
+ hwif->channel = port;
+
+ if (d->chipset)
+ hwif->chipset = d->chipset;
+
+ if (d->init_iops)
+ d->init_iops(hwif);
+
+ if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) ||
+ (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
+ hwif->irq = port ? 15 : 14;
+
+ /* ->host_flags may be set by ->init_iops (or even earlier...) */
+ hwif->host_flags |= d->host_flags;
+ hwif->pio_mask = d->pio_mask;
+
+ if (d->tp_ops)
+ hwif->tp_ops = d->tp_ops;
+
+ /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
+ if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
+ hwif->port_ops = d->port_ops;
+
+ hwif->swdma_mask = d->swdma_mask;
+ hwif->mwdma_mask = d->mwdma_mask;
+ hwif->ultra_mask = d->udma_mask;
+
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ int rc;
+
+ hwif->dma_ops = d->dma_ops;
+
+ if (d->init_dma)
+ rc = d->init_dma(hwif, d);
+ else
+ rc = ide_hwif_setup_dma(hwif, d);
+
+ if (rc < 0) {
+ printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
+
+ hwif->dma_ops = NULL;
+ hwif->dma_base = 0;
+ hwif->swdma_mask = 0;
+ hwif->mwdma_mask = 0;
+ hwif->ultra_mask = 0;
+ }
+ }
+
+ if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
+ ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base))
+ hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
+
+ if (d->max_sectors)
+ hwif->rqsize = d->max_sectors;
+
+ /* call chipset specific routine for each enabled port */
+ if (d->init_hwif)
+ d->init_hwif(hwif);
+}
+
+static void ide_port_cable_detect(ide_hwif_t *hwif)
+{
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) {
+ if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+ hwif->cbl = port_ops->cable_detect(hwif);
+ }
+}
+
+static const u8 ide_hwif_to_major[] =
+ { IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR, IDE4_MAJOR,
+ IDE5_MAJOR, IDE6_MAJOR, IDE7_MAJOR, IDE8_MAJOR, IDE9_MAJOR };
+
+static void ide_port_init_devices_data(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ u8 j = (hwif->index * MAX_DRIVES) + i;
+
+ memset(drive, 0, sizeof(*drive));
+
+ drive->media = ide_disk;
+ drive->select = (i << 4) | ATA_DEVICE_OBS;
+ drive->hwif = hwif;
+ drive->ready_stat = ATA_DRDY;
+ drive->bad_wstat = BAD_W_STAT;
+ drive->special.b.recalibrate = 1;
+ drive->special.b.set_geometry = 1;
+ drive->name[0] = 'h';
+ drive->name[1] = 'd';
+ drive->name[2] = 'a' + j;
+ drive->max_failures = IDE_DEFAULT_MAX_FAILURES;
+
+ INIT_LIST_HEAD(&drive->list);
+ init_completion(&drive->gendev_rel_comp);
+ }
+}
+
+static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
+{
+ /* fill in any non-zero initial values */
+ hwif->index = index;
+ hwif->major = ide_hwif_to_major[index];
+
+ hwif->name[0] = 'i';
+ hwif->name[1] = 'd';
+ hwif->name[2] = 'e';
+ hwif->name[3] = '0' + index;
+
+ init_completion(&hwif->gendev_rel_comp);
+
+ hwif->tp_ops = &default_tp_ops;
+
+ ide_port_init_devices_data(hwif);
+}
+
+static void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
+{
+ memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
+ hwif->irq = hw->irq;
+ hwif->chipset = hw->chipset;
+ hwif->dev = hw->dev;
+ hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
+ hwif->ack_intr = hw->ack_intr;
+ hwif->config_data = hw->config;
+}
+
+static unsigned int ide_indexes;
+
+/**
+ * ide_find_port_slot - find free port slot
+ * @d: IDE port info
+ *
+ * Return the new port slot index or -ENOENT if we are out of free slots.
+ */
+
+static int ide_find_port_slot(const struct ide_port_info *d)
+{
+ int idx = -ENOENT;
+ u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
+ u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;;
+
+ /*
+ * Claim an unassigned slot.
+ *
+ * Give preference to claiming other slots before claiming ide0/ide1,
+ * just in case there's another interface yet-to-be-scanned
+ * which uses ports 0x1f0/0x170 (the ide0/ide1 defaults).
+ *
+ * Unless there is a bootable card that does not use the standard
+ * ports 0x1f0/0x170 (the ide0/ide1 defaults).
+ */
+ mutex_lock(&ide_cfg_mtx);
+ if (bootable) {
+ if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
+ idx = ffz(ide_indexes | i);
+ } else {
+ if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
+ idx = ffz(ide_indexes | 3);
+ else if ((ide_indexes & 3) != 3)
+ idx = ffz(ide_indexes);
+ }
+ if (idx >= 0)
+ ide_indexes |= (1 << idx);
+ mutex_unlock(&ide_cfg_mtx);
+
+ return idx;
+}
+
+static void ide_free_port_slot(int idx)
+{
+ mutex_lock(&ide_cfg_mtx);
+ ide_indexes &= ~(1 << idx);
+ mutex_unlock(&ide_cfg_mtx);
+}
+
+static void ide_port_free_devices(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif)
+ kfree(drive);
+}
+
+static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
+{
+ int i;
+
+ for (i = 0; i < MAX_DRIVES; i++) {
+ ide_drive_t *drive;
+
+ drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
+ if (drive == NULL)
+ goto out_nomem;
+
+ hwif->devices[i] = drive;
+ }
+ return 0;
+
+out_nomem:
+ ide_port_free_devices(hwif);
+ return -ENOMEM;
+}
+
+struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
+{
+ struct ide_host *host;
+ struct device *dev = hws[0] ? hws[0]->dev : NULL;
+ int node = dev ? dev_to_node(dev) : -1;
+ int i;
+
+ host = kzalloc_node(sizeof(*host), GFP_KERNEL, node);
+ if (host == NULL)
+ return NULL;
+
+ for (i = 0; i < MAX_HOST_PORTS; i++) {
+ ide_hwif_t *hwif;
+ int idx;
+
+ if (hws[i] == NULL)
+ continue;
+
+ hwif = kzalloc_node(sizeof(*hwif), GFP_KERNEL, node);
+ if (hwif == NULL)
+ continue;
+
+ if (ide_port_alloc_devices(hwif, node) < 0) {
+ kfree(hwif);
+ continue;
+ }
+
+ idx = ide_find_port_slot(d);
+ if (idx < 0) {
+ printk(KERN_ERR "%s: no free slot for interface\n",
+ d ? d->name : "ide");
+ kfree(hwif);
+ continue;
+ }
+
+ ide_init_port_data(hwif, idx);
+
+ hwif->host = host;
+
+ host->ports[i] = hwif;
+ host->n_ports++;
+ }
+
+ if (host->n_ports == 0) {
+ kfree(host);
+ return NULL;
+ }
+
+ host->dev[0] = dev;
+
+ if (d) {
+ host->init_chipset = d->init_chipset;
+ host->host_flags = d->host_flags;
+ }
+
+ return host;
+}
+EXPORT_SYMBOL_GPL(ide_host_alloc);
+
+static void ide_port_free(ide_hwif_t *hwif)
+{
+ ide_port_free_devices(hwif);
+ ide_free_port_slot(hwif->index);
+ kfree(hwif);
+}
+
+static void ide_disable_port(ide_hwif_t *hwif)
+{
+ struct ide_host *host = hwif->host;
+ int i;
+
+ printk(KERN_INFO "%s: disabling port\n", hwif->name);
+
+ for (i = 0; i < MAX_HOST_PORTS; i++) {
+ if (host->ports[i] == hwif) {
+ host->ports[i] = NULL;
+ host->n_ports--;
+ }
+ }
+
+ ide_port_free(hwif);
+}
+
+int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
+ hw_regs_t **hws)
+{
+ ide_hwif_t *hwif, *mate = NULL;
+ int i, j = 0;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL) {
+ mate = NULL;
+ continue;
+ }
+
+ ide_init_port_hw(hwif, hws[i]);
+ ide_port_apply_params(hwif);
+
+ if (d == NULL) {
+ mate = NULL;
+ } else {
+ if ((i & 1) && mate) {
+ hwif->mate = mate;
+ mate->mate = hwif;
+ }
+
+ mate = (i & 1) ? NULL : hwif;
+
+ ide_init_port(hwif, i & 1, d);
+ ide_port_cable_detect(hwif);
+ }
+
+ ide_port_init_devices(hwif);
+ }
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ if (ide_probe_port(hwif) == 0)
+ hwif->present = 1;
+
+ if (hwif->chipset != ide_4drives || !hwif->mate ||
+ !hwif->mate->present) {
+ if (ide_register_port(hwif)) {
+ ide_disable_port(hwif);
+ continue;
+ }
+ }
+
+ if (hwif->present)
+ ide_port_tune_devices(hwif);
+ }
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ if (hwif_init(hwif) == 0) {
+ printk(KERN_INFO "%s: failed to initialize IDE "
+ "interface\n", hwif->name);
+ device_unregister(&hwif->gendev);
+ ide_disable_port(hwif);
+ continue;
+ }
+
+ if (hwif->present)
+ if (ide_port_setup_devices(hwif) == 0) {
+ hwif->present = 0;
+ continue;
+ }
+
+ j++;
+
+ ide_acpi_init(hwif);
+
+ if (hwif->present)
+ ide_acpi_port_init_devices(hwif);
+ }
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ if (hwif->present)
+ hwif_register_devices(hwif);
+ }
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ ide_sysfs_register_port(hwif);
+ ide_proc_register_port(hwif);
+
+ if (hwif->present)
+ ide_proc_port_register_devices(hwif);
+ }
+
+ return j ? 0 : -1;
+}
+EXPORT_SYMBOL_GPL(ide_host_register);
+
+int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws,
+ struct ide_host **hostp)
+{
+ struct ide_host *host;
+ int rc;
+
+ host = ide_host_alloc(d, hws);
+ if (host == NULL)
+ return -ENOMEM;
+
+ rc = ide_host_register(host, d, hws);
+ if (rc) {
+ ide_host_free(host);
+ return rc;
+ }
+
+ if (hostp)
+ *hostp = host;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_host_add);
+
+static void __ide_port_unregister_devices(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if (drive->dev_flags & IDE_DFLAG_PRESENT) {
+ device_unregister(&drive->gendev);
+ wait_for_completion(&drive->gendev_rel_comp);
+ }
+ }
+}
+
+void ide_port_unregister_devices(ide_hwif_t *hwif)
+{
+ mutex_lock(&ide_cfg_mtx);
+ __ide_port_unregister_devices(hwif);
+ hwif->present = 0;
+ ide_port_init_devices_data(hwif);
+ mutex_unlock(&ide_cfg_mtx);
+}
+EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
+
+/**
+ * ide_unregister - free an IDE interface
+ * @hwif: IDE interface
+ *
+ * Perform the final unregister of an IDE interface.
+ *
+ * Locking:
+ * The caller must not hold the IDE locks.
+ *
+ * It is up to the caller to be sure there is no pending I/O here,
+ * and that the interface will not be reopened (present/vanishing
+ * locking isn't yet done BTW).
+ */
+
+static void ide_unregister(ide_hwif_t *hwif)
+{
+ BUG_ON(in_interrupt());
+ BUG_ON(irqs_disabled());
+
+ mutex_lock(&ide_cfg_mtx);
+
+ if (hwif->present) {
+ __ide_port_unregister_devices(hwif);
+ hwif->present = 0;
+ }
+
+ ide_proc_unregister_port(hwif);
+
+ free_irq(hwif->irq, hwif);
+
+ device_unregister(hwif->portdev);
+ device_unregister(&hwif->gendev);
+ wait_for_completion(&hwif->gendev_rel_comp);
+
+ /*
+ * Remove us from the kernel's knowledge
+ */
+ blk_unregister_region(MKDEV(hwif->major, 0), MAX_DRIVES<<PARTN_BITS);
+ kfree(hwif->sg_table);
+ unregister_blkdev(hwif->major, hwif->name);
+
+ ide_release_dma_engine(hwif);
+
+ mutex_unlock(&ide_cfg_mtx);
+}
+
+void ide_host_free(struct ide_host *host)
+{
+ ide_hwif_t *hwif;
+ int i;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif)
+ ide_port_free(hwif);
+ }
+
+ kfree(host);
+}
+EXPORT_SYMBOL_GPL(ide_host_free);
+
+void ide_host_remove(struct ide_host *host)
+{
+ ide_hwif_t *hwif;
+ int i;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif)
+ ide_unregister(hwif);
+ }
+
+ ide_host_free(host);
+}
+EXPORT_SYMBOL_GPL(ide_host_remove);
+
+void ide_port_scan(ide_hwif_t *hwif)
+{
+ ide_port_apply_params(hwif);
+ ide_port_cable_detect(hwif);
+ ide_port_init_devices(hwif);
+
+ if (ide_probe_port(hwif) < 0)
+ return;
+
+ hwif->present = 1;
+
+ ide_port_tune_devices(hwif);
+ ide_port_setup_devices(hwif);
+ ide_acpi_port_init_devices(hwif);
+ hwif_register_devices(hwif);
+ ide_proc_port_register_devices(hwif);
+}
+EXPORT_SYMBOL_GPL(ide_port_scan);
diff --git a/windhoek/ide/ide-proc.c b/windhoek/ide/ide-proc.c
new file mode 100644
index 00000000..a7b9287e
--- /dev/null
+++ b/windhoek/ide/ide-proc.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright (C) 1997-1998 Mark Lord
+ * Copyright (C) 2003 Red Hat
+ *
+ * Some code was moved here from ide.c, see it for original copyrights.
+ */
+
+/*
+ * This is the /proc/ide/ filesystem implementation.
+ *
+ * Drive/Driver settings can be retrieved by reading the drive's
+ * "settings" files. e.g. "cat /proc/ide0/hda/settings"
+ * To write a new value "val" into a specific setting "name", use:
+ * echo "name:val" >/proc/ide/ide0/hda/settings
+ */
+
+#include <linux/module.h>
+
+#include <asm/uaccess.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/ctype.h>
+#include <linux/ide.h>
+#include <linux/seq_file.h>
+
+#include <asm/io.h>
+
+static struct proc_dir_entry *proc_ide_root;
+
+static int proc_ide_read_imodel
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *) data;
+ int len;
+ const char *name;
+
+ switch (hwif->chipset) {
+ case ide_generic: name = "generic"; break;
+ case ide_pci: name = "pci"; break;
+ case ide_cmd640: name = "cmd640"; break;
+ case ide_dtc2278: name = "dtc2278"; break;
+ case ide_ali14xx: name = "ali14xx"; break;
+ case ide_qd65xx: name = "qd65xx"; break;
+ case ide_umc8672: name = "umc8672"; break;
+ case ide_ht6560b: name = "ht6560b"; break;
+ case ide_4drives: name = "4drives"; break;
+ case ide_pmac: name = "mac-io"; break;
+ case ide_au1xxx: name = "au1xxx"; break;
+ case ide_palm3710: name = "palm3710"; break;
+ case ide_acorn: name = "acorn"; break;
+ default: name = "(unknown)"; break;
+ }
+ len = sprintf(page, "%s\n", name);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int proc_ide_read_mate
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *) data;
+ int len;
+
+ if (hwif && hwif->mate)
+ len = sprintf(page, "%s\n", hwif->mate->name);
+ else
+ len = sprintf(page, "(none)\n");
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int proc_ide_read_channel
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *) data;
+ int len;
+
+ page[0] = hwif->channel ? '1' : '0';
+ page[1] = '\n';
+ len = 2;
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int proc_ide_read_identify
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *)data;
+ int len = 0, i = 0;
+ int err = 0;
+
+ len = sprintf(page, "\n");
+
+ if (drive) {
+ __le16 *val = (__le16 *)page;
+
+ err = taskfile_lib_get_identify(drive, page);
+ if (!err) {
+ char *out = (char *)page + SECTOR_SIZE;
+
+ page = out;
+ do {
+ out += sprintf(out, "%04x%c",
+ le16_to_cpup(val), (++i & 7) ? ' ' : '\n');
+ val += 1;
+ } while (i < SECTOR_SIZE / 2);
+ len = out - page;
+ }
+ }
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+/**
+ * ide_find_setting - find a specific setting
+ * @st: setting table pointer
+ * @name: setting name
+ *
+ * Scan's the setting table for a matching entry and returns
+ * this or NULL if no entry is found. The caller must hold the
+ * setting semaphore
+ */
+
+static
+const struct ide_proc_devset *ide_find_setting(const struct ide_proc_devset *st,
+ char *name)
+{
+ while (st->name) {
+ if (strcmp(st->name, name) == 0)
+ break;
+ st++;
+ }
+ return st->name ? st : NULL;
+}
+
+/**
+ * ide_read_setting - read an IDE setting
+ * @drive: drive to read from
+ * @setting: drive setting
+ *
+ * Read a drive setting and return the value. The caller
+ * must hold the ide_setting_mtx when making this call.
+ *
+ * BUGS: the data return and error are the same return value
+ * so an error -EINVAL and true return of the same value cannot
+ * be told apart
+ */
+
+static int ide_read_setting(ide_drive_t *drive,
+ const struct ide_proc_devset *setting)
+{
+ const struct ide_devset *ds = setting->setting;
+ int val = -EINVAL;
+
+ if (ds->get)
+ val = ds->get(drive);
+
+ return val;
+}
+
+/**
+ * ide_write_setting - read an IDE setting
+ * @drive: drive to read from
+ * @setting: drive setting
+ * @val: value
+ *
+ * Write a drive setting if it is possible. The caller
+ * must hold the ide_setting_mtx when making this call.
+ *
+ * BUGS: the data return and error are the same return value
+ * so an error -EINVAL and true return of the same value cannot
+ * be told apart
+ *
+ * FIXME: This should be changed to enqueue a special request
+ * to the driver to change settings, and then wait on a sema for completion.
+ * The current scheme of polling is kludgy, though safe enough.
+ */
+
+static int ide_write_setting(ide_drive_t *drive,
+ const struct ide_proc_devset *setting, int val)
+{
+ const struct ide_devset *ds = setting->setting;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!ds->set)
+ return -EPERM;
+ if ((ds->flags & DS_SYNC)
+ && (val < setting->min || val > setting->max))
+ return -EINVAL;
+ return ide_devset_execute(drive, ds, val);
+}
+
+ide_devset_get(xfer_rate, current_speed);
+
+static int set_xfer_rate (ide_drive_t *drive, int arg)
+{
+ ide_task_t task;
+ int err;
+
+ if (arg < XFER_PIO_0 || arg > XFER_UDMA_6)
+ return -EINVAL;
+
+ memset(&task, 0, sizeof(task));
+ task.tf.command = ATA_CMD_SET_FEATURES;
+ task.tf.feature = SETFEATURES_XFER;
+ task.tf.nsect = (u8)arg;
+ task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT |
+ IDE_TFLAG_IN_NSECT;
+
+ err = ide_no_data_taskfile(drive, &task);
+
+ if (!err) {
+ ide_set_xfer_rate(drive, (u8) arg);
+ ide_driveid_update(drive);
+ }
+ return err;
+}
+
+ide_devset_rw(current_speed, xfer_rate);
+ide_devset_rw_field(init_speed, init_speed);
+ide_devset_rw_flag(nice1, IDE_DFLAG_NICE1);
+ide_devset_rw_field(number, dn);
+
+static const struct ide_proc_devset ide_generic_settings[] = {
+ IDE_PROC_DEVSET(current_speed, 0, 70),
+ IDE_PROC_DEVSET(init_speed, 0, 70),
+ IDE_PROC_DEVSET(io_32bit, 0, 1 + (SUPPORT_VLB_SYNC << 1)),
+ IDE_PROC_DEVSET(keepsettings, 0, 1),
+ IDE_PROC_DEVSET(nice1, 0, 1),
+ IDE_PROC_DEVSET(number, 0, 3),
+ IDE_PROC_DEVSET(pio_mode, 0, 255),
+ IDE_PROC_DEVSET(unmaskirq, 0, 1),
+ IDE_PROC_DEVSET(using_dma, 0, 1),
+ { NULL },
+};
+
+static void proc_ide_settings_warn(void)
+{
+ static int warned;
+
+ if (warned)
+ return;
+
+ printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
+ "obsolete, and will be removed soon!\n");
+ warned = 1;
+}
+
+static int proc_ide_read_settings
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ const struct ide_proc_devset *setting, *g, *d;
+ const struct ide_devset *ds;
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char *out = page;
+ int len, rc, mul_factor, div_factor;
+
+ proc_ide_settings_warn();
+
+ mutex_lock(&ide_setting_mtx);
+ g = ide_generic_settings;
+ d = drive->settings;
+ out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n");
+ out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n");
+ while (g->name || (d && d->name)) {
+ /* read settings in the alphabetical order */
+ if (g->name && d && d->name) {
+ if (strcmp(d->name, g->name) < 0)
+ setting = d++;
+ else
+ setting = g++;
+ } else if (d && d->name) {
+ setting = d++;
+ } else
+ setting = g++;
+ mul_factor = setting->mulf ? setting->mulf(drive) : 1;
+ div_factor = setting->divf ? setting->divf(drive) : 1;
+ out += sprintf(out, "%-24s", setting->name);
+ rc = ide_read_setting(drive, setting);
+ if (rc >= 0)
+ out += sprintf(out, "%-16d", rc * mul_factor / div_factor);
+ else
+ out += sprintf(out, "%-16s", "write-only");
+ out += sprintf(out, "%-16d%-16d", (setting->min * mul_factor + div_factor - 1) / div_factor, setting->max * mul_factor / div_factor);
+ ds = setting->setting;
+ if (ds->get)
+ out += sprintf(out, "r");
+ if (ds->set)
+ out += sprintf(out, "w");
+ out += sprintf(out, "\n");
+ }
+ len = out - page;
+ mutex_unlock(&ide_setting_mtx);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+#define MAX_LEN 30
+
+static int proc_ide_write_settings(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char name[MAX_LEN + 1];
+ int for_real = 0, mul_factor, div_factor;
+ unsigned long n;
+
+ const struct ide_proc_devset *setting;
+ char *buf, *s;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ proc_ide_settings_warn();
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ s = buf = (char *)__get_free_page(GFP_USER);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, buffer, count)) {
+ free_page((unsigned long)buf);
+ return -EFAULT;
+ }
+
+ buf[count] = '\0';
+
+ /*
+ * Skip over leading whitespace
+ */
+ while (count && isspace(*s)) {
+ --count;
+ ++s;
+ }
+ /*
+ * Do one full pass to verify all parameters,
+ * then do another to actually write the new settings.
+ */
+ do {
+ char *p = s;
+ n = count;
+ while (n > 0) {
+ unsigned val;
+ char *q = p;
+
+ while (n > 0 && *p != ':') {
+ --n;
+ p++;
+ }
+ if (*p != ':')
+ goto parse_error;
+ if (p - q > MAX_LEN)
+ goto parse_error;
+ memcpy(name, q, p - q);
+ name[p - q] = 0;
+
+ if (n > 0) {
+ --n;
+ p++;
+ } else
+ goto parse_error;
+
+ val = simple_strtoul(p, &q, 10);
+ n -= q - p;
+ p = q;
+ if (n > 0 && !isspace(*p))
+ goto parse_error;
+ while (n > 0 && isspace(*p)) {
+ --n;
+ ++p;
+ }
+
+ mutex_lock(&ide_setting_mtx);
+ /* generic settings first, then driver specific ones */
+ setting = ide_find_setting(ide_generic_settings, name);
+ if (!setting) {
+ if (drive->settings)
+ setting = ide_find_setting(drive->settings, name);
+ if (!setting) {
+ mutex_unlock(&ide_setting_mtx);
+ goto parse_error;
+ }
+ }
+ if (for_real) {
+ mul_factor = setting->mulf ? setting->mulf(drive) : 1;
+ div_factor = setting->divf ? setting->divf(drive) : 1;
+ ide_write_setting(drive, setting, val * div_factor / mul_factor);
+ }
+ mutex_unlock(&ide_setting_mtx);
+ }
+ } while (!for_real++);
+ free_page((unsigned long)buf);
+ return count;
+parse_error:
+ free_page((unsigned long)buf);
+ printk("proc_ide_write_settings(): parse error\n");
+ return -EINVAL;
+}
+
+int proc_ide_read_capacity
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = sprintf(page, "%llu\n", (long long)0x7fffffff);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+EXPORT_SYMBOL_GPL(proc_ide_read_capacity);
+
+int proc_ide_read_geometry
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char *out = page;
+ int len;
+
+ out += sprintf(out, "physical %d/%d/%d\n",
+ drive->cyl, drive->head, drive->sect);
+ out += sprintf(out, "logical %d/%d/%d\n",
+ drive->bios_cyl, drive->bios_head, drive->bios_sect);
+
+ len = out - page;
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+EXPORT_SYMBOL(proc_ide_read_geometry);
+
+static int proc_ide_read_dmodel
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char *m = (char *)&drive->id[ATA_ID_PROD];
+ int len;
+
+ len = sprintf(page, "%.40s\n", m[0] ? m : "(none)");
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int proc_ide_read_driver
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *)data;
+ struct device *dev = &drive->gendev;
+ struct ide_driver *ide_drv;
+ int len;
+
+ if (dev->driver) {
+ ide_drv = to_ide_driver(dev->driver);
+ len = sprintf(page, "%s version %s\n",
+ dev->driver->name, ide_drv->version);
+ } else
+ len = sprintf(page, "ide-default version 0.9.newide\n");
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
+{
+ struct device *dev = &drive->gendev;
+ int ret = 1;
+ int err;
+
+ device_release_driver(dev);
+ /* FIXME: device can still be in use by previous driver */
+ strlcpy(drive->driver_req, driver, sizeof(drive->driver_req));
+ err = device_attach(dev);
+ if (err < 0)
+ printk(KERN_WARNING "IDE: %s: device_attach error: %d\n",
+ __func__, err);
+ drive->driver_req[0] = 0;
+ if (dev->driver == NULL) {
+ err = device_attach(dev);
+ if (err < 0)
+ printk(KERN_WARNING
+ "IDE: %s: device_attach(2) error: %d\n",
+ __func__, err);
+ }
+ if (dev->driver && !strcmp(dev->driver->name, driver))
+ ret = 0;
+
+ return ret;
+}
+
+static int proc_ide_write_driver
+ (struct file *file, const char __user *buffer, unsigned long count, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char name[32];
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (count > 31)
+ count = 31;
+ if (copy_from_user(name, buffer, count))
+ return -EFAULT;
+ name[count] = '\0';
+ if (ide_replace_subdriver(drive, name))
+ return -EINVAL;
+ return count;
+}
+
+static int proc_ide_read_media
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ const char *media;
+ int len;
+
+ switch (drive->media) {
+ case ide_disk: media = "disk\n"; break;
+ case ide_cdrom: media = "cdrom\n"; break;
+ case ide_tape: media = "tape\n"; break;
+ case ide_floppy: media = "floppy\n"; break;
+ case ide_optical: media = "optical\n"; break;
+ default: media = "UNKNOWN\n"; break;
+ }
+ strcpy(page, media);
+ len = strlen(media);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static ide_proc_entry_t generic_drive_entries[] = {
+ { "driver", S_IFREG|S_IRUGO, proc_ide_read_driver,
+ proc_ide_write_driver },
+ { "identify", S_IFREG|S_IRUSR, proc_ide_read_identify, NULL },
+ { "media", S_IFREG|S_IRUGO, proc_ide_read_media, NULL },
+ { "model", S_IFREG|S_IRUGO, proc_ide_read_dmodel, NULL },
+ { "settings", S_IFREG|S_IRUSR|S_IWUSR, proc_ide_read_settings,
+ proc_ide_write_settings },
+ { NULL, 0, NULL, NULL }
+};
+
+static void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data)
+{
+ struct proc_dir_entry *ent;
+
+ if (!dir || !p)
+ return;
+ while (p->name != NULL) {
+ ent = create_proc_entry(p->name, p->mode, dir);
+ if (!ent) return;
+ ent->data = data;
+ ent->read_proc = p->read_proc;
+ ent->write_proc = p->write_proc;
+ p++;
+ }
+}
+
+static void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
+{
+ if (!dir || !p)
+ return;
+ while (p->name != NULL) {
+ remove_proc_entry(p->name, dir);
+ p++;
+ }
+}
+
+void ide_proc_register_driver(ide_drive_t *drive, struct ide_driver *driver)
+{
+ mutex_lock(&ide_setting_mtx);
+ drive->settings = driver->proc_devsets(drive);
+ mutex_unlock(&ide_setting_mtx);
+
+ ide_add_proc_entries(drive->proc, driver->proc_entries(drive), drive);
+}
+
+EXPORT_SYMBOL(ide_proc_register_driver);
+
+/**
+ * ide_proc_unregister_driver - remove driver specific data
+ * @drive: drive
+ * @driver: driver
+ *
+ * Clean up the driver specific /proc files and IDE settings
+ * for a given drive.
+ *
+ * Takes ide_setting_mtx.
+ */
+
+void ide_proc_unregister_driver(ide_drive_t *drive, struct ide_driver *driver)
+{
+ ide_remove_proc_entries(drive->proc, driver->proc_entries(drive));
+
+ mutex_lock(&ide_setting_mtx);
+ /*
+ * ide_setting_mtx protects both the settings list and the use
+ * of settings (we cannot take a setting out that is being used).
+ */
+ drive->settings = NULL;
+ mutex_unlock(&ide_setting_mtx);
+}
+EXPORT_SYMBOL(ide_proc_unregister_driver);
+
+void ide_proc_port_register_devices(ide_hwif_t *hwif)
+{
+ struct proc_dir_entry *ent;
+ struct proc_dir_entry *parent = hwif->proc;
+ ide_drive_t *drive;
+ char name[64];
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0 || drive->proc)
+ continue;
+
+ drive->proc = proc_mkdir(drive->name, parent);
+ if (drive->proc)
+ ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
+ sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
+ ent = proc_symlink(drive->name, proc_ide_root, name);
+ if (!ent) return;
+ }
+}
+
+void ide_proc_unregister_device(ide_drive_t *drive)
+{
+ if (drive->proc) {
+ ide_remove_proc_entries(drive->proc, generic_drive_entries);
+ remove_proc_entry(drive->name, proc_ide_root);
+ remove_proc_entry(drive->name, drive->hwif->proc);
+ drive->proc = NULL;
+ }
+}
+
+static ide_proc_entry_t hwif_entries[] = {
+ { "channel", S_IFREG|S_IRUGO, proc_ide_read_channel, NULL },
+ { "mate", S_IFREG|S_IRUGO, proc_ide_read_mate, NULL },
+ { "model", S_IFREG|S_IRUGO, proc_ide_read_imodel, NULL },
+ { NULL, 0, NULL, NULL }
+};
+
+void ide_proc_register_port(ide_hwif_t *hwif)
+{
+ if (!hwif->proc) {
+ hwif->proc = proc_mkdir(hwif->name, proc_ide_root);
+
+ if (!hwif->proc)
+ return;
+
+ ide_add_proc_entries(hwif->proc, hwif_entries, hwif);
+ }
+}
+
+void ide_proc_unregister_port(ide_hwif_t *hwif)
+{
+ if (hwif->proc) {
+ ide_remove_proc_entries(hwif->proc, hwif_entries);
+ remove_proc_entry(hwif->name, proc_ide_root);
+ hwif->proc = NULL;
+ }
+}
+
+static int proc_print_driver(struct device_driver *drv, void *data)
+{
+ struct ide_driver *ide_drv = to_ide_driver(drv);
+ struct seq_file *s = data;
+
+ seq_printf(s, "%s version %s\n", drv->name, ide_drv->version);
+
+ return 0;
+}
+
+static int ide_drivers_show(struct seq_file *s, void *p)
+{
+ int err;
+
+ err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver);
+ if (err < 0)
+ printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n",
+ __func__, err);
+ return 0;
+}
+
+static int ide_drivers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, &ide_drivers_show, NULL);
+}
+
+static const struct file_operations ide_drivers_operations = {
+ .owner = THIS_MODULE,
+ .open = ide_drivers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void proc_ide_create(void)
+{
+ proc_ide_root = proc_mkdir("ide", NULL);
+
+ if (!proc_ide_root)
+ return;
+
+ proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations);
+}
+
+void proc_ide_destroy(void)
+{
+ remove_proc_entry("drivers", proc_ide_root);
+ remove_proc_entry("ide", NULL);
+}
diff --git a/windhoek/ide/ide-sysfs.c b/windhoek/ide/ide-sysfs.c
new file mode 100644
index 00000000..883ffaca
--- /dev/null
+++ b/windhoek/ide/ide-sysfs.c
@@ -0,0 +1,125 @@
+#include <linux/kernel.h>
+#include <linux/ide.h>
+
+char *ide_media_string(ide_drive_t *drive)
+{
+ switch (drive->media) {
+ case ide_disk:
+ return "disk";
+ case ide_cdrom:
+ return "cdrom";
+ case ide_tape:
+ return "tape";
+ case ide_floppy:
+ return "floppy";
+ case ide_optical:
+ return "optical";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static ssize_t media_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", ide_media_string(drive));
+}
+
+static ssize_t drivename_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", drive->name);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "ide:m-%s\n", ide_media_string(drive));
+}
+
+static ssize_t model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]);
+}
+
+static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]);
+}
+
+static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]);
+}
+
+struct device_attribute ide_dev_attrs[] = {
+ __ATTR_RO(media),
+ __ATTR_RO(drivename),
+ __ATTR_RO(modalias),
+ __ATTR_RO(model),
+ __ATTR_RO(firmware),
+ __ATTR(serial, 0400, serial_show, NULL),
+ __ATTR(unload_heads, 0644, ide_park_show, ide_park_store),
+ __ATTR_NULL
+};
+
+static ssize_t store_delete_devices(struct device *portdev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ ide_hwif_t *hwif = dev_get_drvdata(portdev);
+
+ if (strncmp(buf, "1", n))
+ return -EINVAL;
+
+ ide_port_unregister_devices(hwif);
+
+ return n;
+};
+
+static DEVICE_ATTR(delete_devices, S_IWUSR, NULL, store_delete_devices);
+
+static ssize_t store_scan(struct device *portdev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ ide_hwif_t *hwif = dev_get_drvdata(portdev);
+
+ if (strncmp(buf, "1", n))
+ return -EINVAL;
+
+ ide_port_unregister_devices(hwif);
+ ide_port_scan(hwif);
+
+ return n;
+};
+
+static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
+
+static struct device_attribute *ide_port_attrs[] = {
+ &dev_attr_delete_devices,
+ &dev_attr_scan,
+ NULL
+};
+
+int ide_sysfs_register_port(ide_hwif_t *hwif)
+{
+ int i, uninitialized_var(rc);
+
+ for (i = 0; ide_port_attrs[i]; i++) {
+ rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/windhoek/ide/ide-taskfile.c b/windhoek/ide/ide-taskfile.c
new file mode 100644
index 00000000..a499923a
--- /dev/null
+++ b/windhoek/ide/ide-taskfile.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
+ * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2001-2002 Klaus Smolin
+ * IBM Storage Technology Division
+ * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
+ *
+ * The big the bad and the ugly.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hdreg.h>
+#include <linux/ide.h>
+#include <linux/scatterlist.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include "local.h"
+
+void ide_tf_dump(const char *s, struct ide_taskfile *tf)
+{
+#ifdef DEBUG
+ printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
+ "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
+ s, tf->feature, tf->nsect, tf->lbal,
+ tf->lbam, tf->lbah, tf->device, tf->command);
+ printk("%s: hob: nsect 0x%02x lbal 0x%02x "
+ "lbam 0x%02x lbah 0x%02x\n",
+ s, tf->hob_nsect, tf->hob_lbal,
+ tf->hob_lbam, tf->hob_lbah);
+#endif
+}
+
+int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
+{
+ ide_task_t args;
+
+ memset(&args, 0, sizeof(ide_task_t));
+ args.tf.nsect = 0x01;
+ if (drive->media == ide_disk)
+ args.tf.command = ATA_CMD_ID_ATA;
+ else
+ args.tf.command = ATA_CMD_ID_ATAPI;
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ args.data_phase = TASKFILE_IN;
+ return ide_raw_taskfile(drive, &args, buf, 1);
+}
+
+static ide_startstop_t task_no_data_intr(ide_drive_t *);
+static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
+static ide_startstop_t task_in_intr(ide_drive_t *);
+
+ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_taskfile *tf = &task->tf;
+ ide_handler_t *handler = NULL;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ const struct ide_dma_ops *dma_ops = hwif->dma_ops;
+
+ if (task->data_phase == TASKFILE_MULTI_IN ||
+ task->data_phase == TASKFILE_MULTI_OUT) {
+ if (!drive->mult_count) {
+ printk(KERN_ERR "%s: multimode not set!\n",
+ drive->name);
+ return ide_stopped;
+ }
+ }
+
+ if (task->tf_flags & IDE_TFLAG_FLAGGED)
+ task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
+
+ memcpy(&hwif->task, task, sizeof(*task));
+
+ if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
+ ide_tf_dump(drive->name, tf);
+ tp_ops->set_irq(hwif, 1);
+ SELECT_MASK(drive, 0);
+ tp_ops->tf_load(drive, task);
+ }
+
+ switch (task->data_phase) {
+ case TASKFILE_MULTI_OUT:
+ case TASKFILE_OUT:
+ tp_ops->exec_command(hwif, tf->command);
+ ndelay(400); /* FIXME */
+ return pre_task_out_intr(drive, task->rq);
+ case TASKFILE_MULTI_IN:
+ case TASKFILE_IN:
+ handler = task_in_intr;
+ /* fall-through */
+ case TASKFILE_NO_DATA:
+ if (handler == NULL)
+ handler = task_no_data_intr;
+ ide_execute_command(drive, tf->command, handler,
+ WAIT_WORSTCASE, NULL);
+ return ide_started;
+ default:
+ if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
+ dma_ops->dma_setup(drive))
+ return ide_stopped;
+ dma_ops->dma_exec_cmd(drive, tf->command);
+ dma_ops->dma_start(drive);
+ return ide_started;
+ }
+}
+EXPORT_SYMBOL_GPL(do_rw_taskfile);
+
+/*
+ * Handler for commands without a data phase
+ */
+static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_task_t *task = &hwif->task;
+ struct ide_taskfile *tf = &task->tf;
+ int custom = (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
+ int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
+ u8 stat;
+
+ local_irq_enable_in_hardirq();
+
+ while (1) {
+ stat = hwif->tp_ops->read_status(hwif);
+ if ((stat & ATA_BUSY) == 0 || retries-- == 0)
+ break;
+ udelay(10);
+ };
+
+ if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
+ if (custom && tf->command == ATA_CMD_SET_MULTI) {
+ drive->mult_req = drive->mult_count = 0;
+ drive->special.b.recalibrate = 1;
+ (void)ide_dump_status(drive, __func__, stat);
+ return ide_stopped;
+ } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
+ if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
+ ide_set_handler(drive, &task_no_data_intr,
+ WAIT_WORSTCASE, NULL);
+ return ide_started;
+ }
+ }
+ return ide_error(drive, "task_no_data_intr", stat);
+ /* calls ide_end_drive_cmd */
+ }
+
+ if (!custom)
+ ide_end_drive_cmd(drive, stat, ide_read_error(drive));
+ else if (tf->command == ATA_CMD_IDLEIMMEDIATE) {
+ hwif->tp_ops->tf_read(drive, task);
+ if (tf->lbal != 0xc4) {
+ printk(KERN_ERR "%s: head unload failed!\n",
+ drive->name);
+ ide_tf_dump(drive->name, tf);
+ } else
+ drive->dev_flags |= IDE_DFLAG_PARKED;
+ ide_end_drive_cmd(drive, stat, ide_read_error(drive));
+ } else if (tf->command == ATA_CMD_SET_MULTI)
+ drive->mult_count = drive->mult_req;
+
+ return ide_stopped;
+}
+
+static u8 wait_drive_not_busy(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ int retries;
+ u8 stat;
+
+ /*
+ * Last sector was transfered, wait until device is ready. This can
+ * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
+ */
+ for (retries = 0; retries < 1000; retries++) {
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (stat & ATA_BUSY)
+ udelay(10);
+ else
+ break;
+ }
+
+ if (stat & ATA_BUSY)
+ printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
+
+ return stat;
+}
+
+static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
+ unsigned int write)
+{
+ DEBUG_MSG("========= ide_pio start =========");
+ ide_hwif_t *hwif = drive->hwif;
+ struct scatterlist *sg = hwif->sg_table;
+ struct scatterlist *cursg = hwif->cursg;
+ struct page *page;
+#ifdef CONFIG_HIGHMEM
+ unsigned long flags;
+#endif
+ unsigned int offset;
+ u8 *buf;
+
+ cursg = hwif->cursg;
+ DEBUG_MSG("sg = %p, cursg %p", sg, cursg);
+ if (!cursg) {
+ cursg = sg;
+ hwif->cursg = sg;
+ }
+
+ page = sg_page(cursg);
+ offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
+ DEBUG_MSG("page %p, offset %lx", page, offset);
+
+ /* get the current page and offset */
+ page = nth_page(page, (offset >> PAGE_SHIFT));
+ offset %= PAGE_SIZE;
+ DEBUG_MSG("== transform ==> page %p, offset %lx", page, offset);
+ DEBUG_MSG(" virt %p", page_address(page));
+
+
+#ifdef CONFIG_HIGHMEM
+ local_irq_save(flags);
+#endif
+ buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
+ DEBUG_MSG("kmap_atomic(%p) = %p", page, buf);
+
+ hwif->nleft--;
+ hwif->cursg_ofs++;
+
+ if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
+ hwif->cursg = sg_next(hwif->cursg);
+ hwif->cursg_ofs = 0;
+ }
+
+ /* do the actual data transfer */
+ if (write)
+ hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
+ else
+ hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
+
+ kunmap_atomic(buf, KM_BIO_SRC_IRQ);
+#ifdef CONFIG_HIGHMEM
+ local_irq_restore(flags);
+#endif
+}
+
+static void ide_pio_multi(ide_drive_t *drive, struct request *rq,
+ unsigned int write)
+{
+ unsigned int nsect;
+
+ nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
+ DEBUG_MSG("io on %d sectors (%d, %d)", nsect, drive->hwif->nleft, drive->mult_count);
+ while (nsect--)
+ ide_pio_sector(drive, rq, write);
+}
+
+static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
+ unsigned int write)
+{
+ u8 saved_io_32bit = drive->io_32bit;
+
+ if (rq->bio) /* fs request */
+ rq->errors = 0;
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ ide_task_t *task = rq->special;
+
+ if (task->tf_flags & IDE_TFLAG_IO_16BIT)
+ drive->io_32bit = 0;
+ }
+
+ touch_softlockup_watchdog();
+
+ DEBUG_MSG("data_phase : %s", drive->hwif->data_phase == TASKFILE_MULTI_IN ? "TASKFILE_MULTI_IN" :
+ drive->hwif->data_phase == TASKFILE_MULTI_OUT ? "TASKFILE_MULTI_OUT" :
+ "unknown");
+ switch (drive->hwif->data_phase) {
+ case TASKFILE_MULTI_IN:
+ case TASKFILE_MULTI_OUT:
+ ide_pio_multi(drive, rq, write);
+ break;
+ default:
+ ide_pio_sector(drive, rq, write);
+ break;
+ }
+
+ drive->io_32bit = saved_io_32bit;
+}
+
+static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
+ const char *s, u8 stat)
+{
+ if (rq->bio) {
+ ide_hwif_t *hwif = drive->hwif;
+ int sectors = hwif->nsect - hwif->nleft;
+
+ switch (hwif->data_phase) {
+ case TASKFILE_IN:
+ if (hwif->nleft)
+ break;
+ /* fall through */
+ case TASKFILE_OUT:
+ sectors--;
+ break;
+ case TASKFILE_MULTI_IN:
+ if (hwif->nleft)
+ break;
+ /* fall through */
+ case TASKFILE_MULTI_OUT:
+ sectors -= drive->mult_count;
+ default:
+ break;
+ }
+
+ if (sectors > 0) {
+ struct ide_driver *drv;
+
+ drv = *(struct ide_driver **)rq->rq_disk->private_data;
+ drv->end_request(drive, 1, sectors);
+ }
+ }
+ return ide_error(drive, s, stat);
+}
+
+void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
+{
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ u8 err = ide_read_error(drive);
+
+ ide_end_drive_cmd(drive, stat, err);
+ return;
+ }
+
+ if (rq->rq_disk) {
+ struct ide_driver *drv;
+
+ drv = *(struct ide_driver **)rq->rq_disk->private_data;;
+ drv->end_request(drive, 1, rq->nr_sectors);
+ } else
+ ide_end_request(drive, 1, rq->nr_sectors);
+}
+
+/*
+ * We got an interrupt on a task_in case, but no errors and no DRQ.
+ *
+ * It might be a spurious irq (shared irq), but it might be a
+ * command that had no output.
+ */
+static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat)
+{
+ /* Command all done? */
+ if (OK_STAT(stat, ATA_DRDY, ATA_BUSY)) {
+ task_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+
+ /* Assume it was a spurious irq */
+ ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
+ return ide_started;
+}
+
+/*
+ * Handler for command with PIO data-in phase (Read/Read Multiple).
+ */
+static ide_startstop_t task_in_intr(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = hwif->rq;
+ u8 stat = hwif->tp_ops->read_status(hwif);
+
+ /* Error? */
+ if (stat & ATA_ERR)
+ return task_error(drive, rq, __func__, stat);
+
+ /* Didn't want any data? Odd. */
+ if ((stat & ATA_DRQ) == 0)
+ return task_in_unexpected(drive, rq, stat);
+
+ ide_pio_datablock(drive, rq, 0);
+
+ /* Are we done? Check status and finish transfer. */
+ if (!hwif->nleft) {
+ stat = wait_drive_not_busy(drive);
+ if (!OK_STAT(stat, 0, BAD_STAT))
+ return task_error(drive, rq, __func__, stat);
+ task_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+
+ /* Still data left to transfer. */
+ ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
+
+ return ide_started;
+}
+
+/*
+ * Handler for command with PIO data-out phase (Write/Write Multiple).
+ */
+static ide_startstop_t task_out_intr (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = hwif->rq;
+ u8 stat = hwif->tp_ops->read_status(hwif);
+
+ if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
+ return task_error(drive, rq, __func__, stat);
+
+ /* Deal with unexpected ATA data phase. */
+ if (((stat & ATA_DRQ) == 0) ^ !hwif->nleft)
+ return task_error(drive, rq, __func__, stat);
+
+ if (!hwif->nleft) {
+ task_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+
+ /* Still data left to transfer. */
+ ide_pio_datablock(drive, rq, 1);
+ ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
+
+ return ide_started;
+}
+
+static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
+{
+ ide_startstop_t startstop;
+
+ if (ide_wait_stat(&startstop, drive, ATA_DRQ,
+ drive->bad_wstat, WAIT_DRQ)) {
+ printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
+ drive->name, drive->hwif->data_phase ? "MULT" : "",
+ (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
+ return startstop;
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
+ local_irq_disable();
+
+ ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
+ ide_pio_datablock(drive, rq, 1);
+
+ return ide_started;
+}
+
+int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
+{
+ struct request *rq;
+ int error;
+
+ rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq->buffer = buf;
+
+ /*
+ * (ks) We transfer currently only whole sectors.
+ * This is suffient for now. But, it would be great,
+ * if we would find a solution to transfer any size.
+ * To support special commands like READ LONG.
+ */
+ rq->hard_nr_sectors = rq->nr_sectors = nsect;
+ rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
+
+ if (task->tf_flags & IDE_TFLAG_WRITE)
+ rq->cmd_flags |= REQ_RW;
+
+ rq->special = task;
+ task->rq = rq;
+
+ error = blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_put_request(rq);
+
+ return error;
+}
+
+EXPORT_SYMBOL(ide_raw_taskfile);
+
+int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
+{
+ task->data_phase = TASKFILE_NO_DATA;
+
+ return ide_raw_taskfile(drive, task, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
+
+#ifdef CONFIG_IDE_TASK_IOCTL
+int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+{
+ ide_task_request_t *req_task;
+ ide_task_t args;
+ u8 *outbuf = NULL;
+ u8 *inbuf = NULL;
+ u8 *data_buf = NULL;
+ int err = 0;
+ int tasksize = sizeof(struct ide_task_request_s);
+ unsigned int taskin = 0;
+ unsigned int taskout = 0;
+ u16 nsect = 0;
+ char __user *buf = (char __user *)arg;
+
+// printk("IDE Taskfile ...\n");
+
+ req_task = kzalloc(tasksize, GFP_KERNEL);
+ if (req_task == NULL) return -ENOMEM;
+ if (copy_from_user(req_task, buf, tasksize)) {
+ kfree(req_task);
+ return -EFAULT;
+ }
+
+ taskout = req_task->out_size;
+ taskin = req_task->in_size;
+
+ if (taskin > 65536 || taskout > 65536) {
+ err = -EINVAL;
+ goto abort;
+ }
+
+ if (taskout) {
+ int outtotal = tasksize;
+ outbuf = kzalloc(taskout, GFP_KERNEL);
+ if (outbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ if (copy_from_user(outbuf, buf + outtotal, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+
+ if (taskin) {
+ int intotal = tasksize + taskout;
+ inbuf = kzalloc(taskin, GFP_KERNEL);
+ if (inbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ if (copy_from_user(inbuf, buf + intotal, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+
+ memset(&args, 0, sizeof(ide_task_t));
+
+ memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
+ memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
+
+ args.data_phase = req_task->data_phase;
+
+ args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
+ IDE_TFLAG_IN_TF;
+ if (drive->dev_flags & IDE_DFLAG_LBA48)
+ args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
+
+ if (req_task->out_flags.all) {
+ args.tf_flags |= IDE_TFLAG_FLAGGED;
+
+ if (req_task->out_flags.b.data)
+ args.tf_flags |= IDE_TFLAG_OUT_DATA;
+
+ if (req_task->out_flags.b.nsector_hob)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
+ if (req_task->out_flags.b.sector_hob)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
+ if (req_task->out_flags.b.lcyl_hob)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
+ if (req_task->out_flags.b.hcyl_hob)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
+
+ if (req_task->out_flags.b.error_feature)
+ args.tf_flags |= IDE_TFLAG_OUT_FEATURE;
+ if (req_task->out_flags.b.nsector)
+ args.tf_flags |= IDE_TFLAG_OUT_NSECT;
+ if (req_task->out_flags.b.sector)
+ args.tf_flags |= IDE_TFLAG_OUT_LBAL;
+ if (req_task->out_flags.b.lcyl)
+ args.tf_flags |= IDE_TFLAG_OUT_LBAM;
+ if (req_task->out_flags.b.hcyl)
+ args.tf_flags |= IDE_TFLAG_OUT_LBAH;
+ } else {
+ args.tf_flags |= IDE_TFLAG_OUT_TF;
+ if (args.tf_flags & IDE_TFLAG_LBA48)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB;
+ }
+
+ if (req_task->in_flags.b.data)
+ args.tf_flags |= IDE_TFLAG_IN_DATA;
+
+ switch(req_task->data_phase) {
+ case TASKFILE_MULTI_OUT:
+ if (!drive->mult_count) {
+ /* (hs): give up if multcount is not set */
+ printk(KERN_ERR "%s: %s Multimode Write " \
+ "multcount is not set\n",
+ drive->name, __func__);
+ err = -EPERM;
+ goto abort;
+ }
+ /* fall through */
+ case TASKFILE_OUT:
+ /* fall through */
+ case TASKFILE_OUT_DMAQ:
+ case TASKFILE_OUT_DMA:
+ nsect = taskout / SECTOR_SIZE;
+ data_buf = outbuf;
+ break;
+ case TASKFILE_MULTI_IN:
+ if (!drive->mult_count) {
+ /* (hs): give up if multcount is not set */
+ printk(KERN_ERR "%s: %s Multimode Read failure " \
+ "multcount is not set\n",
+ drive->name, __func__);
+ err = -EPERM;
+ goto abort;
+ }
+ /* fall through */
+ case TASKFILE_IN:
+ /* fall through */
+ case TASKFILE_IN_DMAQ:
+ case TASKFILE_IN_DMA:
+ nsect = taskin / SECTOR_SIZE;
+ data_buf = inbuf;
+ break;
+ case TASKFILE_NO_DATA:
+ break;
+ default:
+ err = -EFAULT;
+ goto abort;
+ }
+
+ if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
+ nsect = 0;
+ else if (!nsect) {
+ nsect = (args.tf.hob_nsect << 8) | args.tf.nsect;
+
+ if (!nsect) {
+ printk(KERN_ERR "%s: in/out command without data\n",
+ drive->name);
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+
+ if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE)
+ args.tf_flags |= IDE_TFLAG_WRITE;
+
+ err = ide_raw_taskfile(drive, &args, data_buf, nsect);
+
+ memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
+ memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
+
+ if ((args.tf_flags & IDE_TFLAG_FLAGGED_SET_IN_FLAGS) &&
+ req_task->in_flags.all == 0) {
+ req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
+ if (drive->dev_flags & IDE_DFLAG_LBA48)
+ req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
+ }
+
+ if (copy_to_user(buf, req_task, tasksize)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ if (taskout) {
+ int outtotal = tasksize;
+ if (copy_to_user(buf + outtotal, outbuf, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+ if (taskin) {
+ int intotal = tasksize + taskout;
+ if (copy_to_user(buf + intotal, inbuf, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+abort:
+ kfree(req_task);
+ kfree(outbuf);
+ kfree(inbuf);
+
+// printk("IDE Taskfile ioctl ended. rc = %i\n", err);
+
+ return err;
+}
+#endif
diff --git a/windhoek/ide/ide.c b/windhoek/ide/ide.c
new file mode 100644
index 00000000..15eb62ed
--- /dev/null
+++ b/windhoek/ide/ide.c
@@ -0,0 +1,554 @@
+/*
+ * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
+ * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
+ */
+
+/*
+ * Mostly written by Mark Lord <mlord@pobox.com>
+ * and Gadi Oxman <gadio@netvision.net.il>
+ * and Andre Hedrick <andre@linux-ide.org>
+ *
+ * See linux/MAINTAINERS for address of current maintainer.
+ *
+ * This is the multiple IDE interface driver, as evolved from hd.c.
+ * It supports up to MAX_HWIFS IDE interfaces, on one or more IRQs
+ * (usually 14 & 15).
+ * There can be up to two drives per interface, as per the ATA-2 spec.
+ *
+ * ...
+ *
+ * From hd.c:
+ * |
+ * | It traverses the request-list, using interrupts to jump between functions.
+ * | As nearly all functions can be called within interrupts, we may not sleep.
+ * | Special care is recommended. Have Fun!
+ * |
+ * | modified by Drew Eckhardt to check nr of hd's from the CMOS.
+ * |
+ * | Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * | in the early extended-partition checks and added DM partitions.
+ * |
+ * | Early work on error handling by Mika Liljeberg (liljeber@cs.Helsinki.FI).
+ * |
+ * | IRQ-unmask, drive-id, multiple-mode, support for ">16 heads",
+ * | and general streamlining by Mark Lord (mlord@pobox.com).
+ *
+ * October, 1994 -- Complete line-by-line overhaul for linux 1.1.x, by:
+ *
+ * Mark Lord (mlord@pobox.com) (IDE Perf.Pkg)
+ * Delman Lee (delman@ieee.org) ("Mr. atdisk2")
+ * Scott Snyder (snyder@fnald0.fnal.gov) (ATAPI IDE cd-rom)
+ *
+ * This was a rewrite of just about everything from hd.c, though some original
+ * code is still sprinkled about. Think of it as a major evolution, with
+ * inspiration from lots of linux users, esp. hamish@zot.apana.org.au
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+
+struct class *ide_port_class;
+
+/*
+ * Locks for IDE setting functionality
+ */
+
+DEFINE_MUTEX(ide_setting_mtx);
+
+ide_devset_get(io_32bit, io_32bit);
+
+static int set_io_32bit(ide_drive_t *drive, int arg)
+{
+ if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT)
+ return -EPERM;
+
+ if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1))
+ return -EINVAL;
+
+ drive->io_32bit = arg;
+
+ return 0;
+}
+
+ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS);
+
+static int set_ksettings(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS;
+
+ return 0;
+}
+
+ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA);
+
+static int set_using_dma(ide_drive_t *drive, int arg)
+{
+#ifdef CONFIG_BLK_DEV_IDEDMA
+ int err = -EPERM;
+
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (ata_id_has_dma(drive->id) == 0)
+ goto out;
+
+ if (drive->hwif->dma_ops == NULL)
+ goto out;
+
+ err = 0;
+
+ if (arg) {
+ if (ide_set_dma(drive))
+ err = -EIO;
+ } else
+ ide_dma_off(drive);
+
+out:
+ return err;
+#else
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ return -EPERM;
+#endif
+}
+
+/*
+ * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away
+ */
+static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
+{
+ switch (req_pio) {
+ case 202:
+ case 201:
+ case 200:
+ case 102:
+ case 101:
+ case 100:
+ return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0;
+ case 9:
+ case 8:
+ return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0;
+ case 7:
+ case 6:
+ return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int set_pio_mode(ide_drive_t *drive, int arg)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (arg < 0 || arg > 255)
+ return -EINVAL;
+
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
+ return -ENOSYS;
+
+ if (set_pio_mode_abuse(drive->hwif, arg)) {
+ if (arg == 8 || arg == 9) {
+ unsigned long flags;
+
+ /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
+ spin_lock_irqsave(&hwif->lock, flags);
+ port_ops->set_pio_mode(drive, arg);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ } else
+ port_ops->set_pio_mode(drive, arg);
+ } else {
+ int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
+
+ ide_set_pio(drive, arg);
+
+ if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
+ if (keep_dma)
+ ide_dma_on(drive);
+ }
+ }
+
+ return 0;
+}
+
+ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK);
+
+static int set_unmaskirq(ide_drive_t *drive, int arg)
+{
+ if (drive->dev_flags & IDE_DFLAG_NO_UNMASK)
+ return -EPERM;
+
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_UNMASK;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_UNMASK;
+
+ return 0;
+}
+
+ide_ext_devset_rw_sync(io_32bit, io_32bit);
+ide_ext_devset_rw_sync(keepsettings, ksettings);
+ide_ext_devset_rw_sync(unmaskirq, unmaskirq);
+ide_ext_devset_rw_sync(using_dma, using_dma);
+__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode);
+
+/**
+ * ide_device_get - get an additional reference to a ide_drive_t
+ * @drive: device to get a reference to
+ *
+ * Gets a reference to the ide_drive_t and increments the use count of the
+ * underlying LLDD module.
+ */
+int ide_device_get(ide_drive_t *drive)
+{
+ struct device *host_dev;
+ struct module *module;
+
+ if (!get_device(&drive->gendev))
+ return -ENXIO;
+
+ host_dev = drive->hwif->host->dev[0];
+ module = host_dev ? host_dev->driver->owner : NULL;
+
+ if (module && !try_module_get(module)) {
+ put_device(&drive->gendev);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_device_get);
+
+/**
+ * ide_device_put - release a reference to a ide_drive_t
+ * @drive: device to release a reference on
+ *
+ * Release a reference to the ide_drive_t and decrements the use count of
+ * the underlying LLDD module.
+ */
+void ide_device_put(ide_drive_t *drive)
+{
+#ifdef CONFIG_MODULE_UNLOAD
+ struct device *host_dev = drive->hwif->host->dev[0];
+ struct module *module = host_dev ? host_dev->driver->owner : NULL;
+
+ if (module)
+ module_put(module);
+#endif
+ put_device(&drive->gendev);
+}
+EXPORT_SYMBOL_GPL(ide_device_put);
+
+static int ide_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return 1;
+}
+
+static int ide_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+
+ add_uevent_var(env, "MEDIA=%s", ide_media_string(drive));
+ add_uevent_var(env, "DRIVENAME=%s", drive->name);
+ add_uevent_var(env, "MODALIAS=ide:m-%s", ide_media_string(drive));
+ return 0;
+}
+
+static int generic_ide_probe(struct device *dev)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ struct ide_driver *drv = to_ide_driver(dev->driver);
+
+ return drv->probe ? drv->probe(drive) : -ENODEV;
+}
+
+static int generic_ide_remove(struct device *dev)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ struct ide_driver *drv = to_ide_driver(dev->driver);
+
+ if (drv->remove)
+ drv->remove(drive);
+
+ return 0;
+}
+
+static void generic_ide_shutdown(struct device *dev)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ struct ide_driver *drv = to_ide_driver(dev->driver);
+
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(drive);
+}
+
+struct bus_type ide_bus_type = {
+ .name = "ide",
+ .match = ide_bus_match,
+ .uevent = ide_uevent,
+ .probe = generic_ide_probe,
+ .remove = generic_ide_remove,
+ .shutdown = generic_ide_shutdown,
+ .dev_attrs = ide_dev_attrs,
+#ifndef DDE_LINUX
+ .suspend = generic_ide_suspend,
+ .resume = generic_ide_resume,
+#endif
+};
+
+EXPORT_SYMBOL_GPL(ide_bus_type);
+
+int ide_vlb_clk;
+EXPORT_SYMBOL_GPL(ide_vlb_clk);
+
+module_param_named(vlb_clock, ide_vlb_clk, int, 0);
+MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)");
+
+int ide_pci_clk;
+EXPORT_SYMBOL_GPL(ide_pci_clk);
+
+module_param_named(pci_clock, ide_pci_clk, int, 0);
+MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
+
+static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
+{
+ int a, b, i, j = 1;
+ unsigned int *dev_param_mask = (unsigned int *)kp->arg;
+
+ /* controller . device (0 or 1) [ : 1 (set) | 0 (clear) ] */
+ if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 &&
+ sscanf(s, "%d.%d", &a, &b) != 2)
+ return -EINVAL;
+
+ i = a * MAX_DRIVES + b;
+
+ if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (j)
+ *dev_param_mask |= (1 << i);
+ else
+ *dev_param_mask &= ~(1 << i);
+
+ return 0;
+}
+
+static unsigned int ide_nodma;
+
+module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0);
+MODULE_PARM_DESC(nodma, "disallow DMA for a device");
+
+static unsigned int ide_noflush;
+
+module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
+MODULE_PARM_DESC(noflush, "disable flush requests for a device");
+
+static unsigned int ide_noprobe;
+
+module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
+MODULE_PARM_DESC(noprobe, "skip probing for a device");
+
+static unsigned int ide_nowerr;
+
+module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0);
+MODULE_PARM_DESC(nowerr, "ignore the ATA_DF bit for a device");
+
+static unsigned int ide_cdroms;
+
+module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0);
+MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
+
+struct chs_geom {
+ unsigned int cyl;
+ u8 head;
+ u8 sect;
+};
+
+static unsigned int ide_disks;
+static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
+
+static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
+{
+ int a, b, c = 0, h = 0, s = 0, i, j = 1;
+
+ /* controller . device (0 or 1) : Cylinders , Heads , Sectors */
+ /* controller . device (0 or 1) : 1 (use CHS) | 0 (ignore CHS) */
+ if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 &&
+ sscanf(str, "%d.%d:%d", &a, &b, &j) != 3)
+ return -EINVAL;
+
+ i = a * MAX_DRIVES + b;
+
+ if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (c > INT_MAX || h > 255 || s > 255)
+ return -EINVAL;
+
+ if (j)
+ ide_disks |= (1 << i);
+ else
+ ide_disks &= ~(1 << i);
+
+ ide_disks_chs[i].cyl = c;
+ ide_disks_chs[i].head = h;
+ ide_disks_chs[i].sect = s;
+
+ return 0;
+}
+
+module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0);
+MODULE_PARM_DESC(chs, "force device as a disk (using CHS)");
+
+static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
+{
+ int i = drive->hwif->index * MAX_DRIVES + unit;
+
+ if (ide_nodma & (1 << i)) {
+ printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name);
+ drive->dev_flags |= IDE_DFLAG_NODMA;
+ }
+ if (ide_noflush & (1 << i)) {
+ printk(KERN_INFO "ide: disabling flush requests for %s\n",
+ drive->name);
+ drive->dev_flags |= IDE_DFLAG_NOFLUSH;
+ }
+ if (ide_noprobe & (1 << i)) {
+ printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
+ drive->dev_flags |= IDE_DFLAG_NOPROBE;
+ }
+ if (ide_nowerr & (1 << i)) {
+ printk(KERN_INFO "ide: ignoring the ATA_DF bit for %s\n",
+ drive->name);
+ drive->bad_wstat = BAD_R_STAT;
+ }
+ if (ide_cdroms & (1 << i)) {
+ printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name);
+ drive->dev_flags |= IDE_DFLAG_PRESENT;
+ drive->media = ide_cdrom;
+ /* an ATAPI device ignores DRDY */
+ drive->ready_stat = 0;
+ }
+ if (ide_disks & (1 << i)) {
+ drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl;
+ drive->head = drive->bios_head = ide_disks_chs[i].head;
+ drive->sect = drive->bios_sect = ide_disks_chs[i].sect;
+
+ printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n",
+ drive->name,
+ drive->cyl, drive->head, drive->sect);
+
+ drive->dev_flags |= IDE_DFLAG_FORCED_GEOM | IDE_DFLAG_PRESENT;
+ drive->media = ide_disk;
+ drive->ready_stat = ATA_DRDY;
+ }
+}
+
+static unsigned int ide_ignore_cable;
+
+static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
+{
+ int i, j = 1;
+
+ /* controller (ignore) */
+ /* controller : 1 (ignore) | 0 (use) */
+ if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
+ return -EINVAL;
+
+ if (i >= MAX_HWIFS || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (j)
+ ide_ignore_cable |= (1 << i);
+ else
+ ide_ignore_cable &= ~(1 << i);
+
+ return 0;
+}
+
+module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0);
+MODULE_PARM_DESC(ignore_cable, "ignore cable detection");
+
+void ide_port_apply_params(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i;
+
+ if (ide_ignore_cable & (1 << hwif->index)) {
+ printk(KERN_INFO "ide: ignoring cable detection for %s\n",
+ hwif->name);
+ hwif->cbl = ATA_CBL_PATA40_SHORT;
+ }
+
+ ide_port_for_each_dev(i, drive, hwif)
+ ide_dev_apply_params(drive, i);
+}
+
+/*
+ * This is gets invoked once during initialization, to set *everything* up
+ */
+static int __init ide_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Uniform Multi-Platform E-IDE driver\n");
+
+ ret = bus_register(&ide_bus_type);
+ if (ret < 0) {
+ printk(KERN_WARNING "IDE: bus_register error: %d\n", ret);
+ return ret;
+ }
+
+ ide_port_class = class_create(THIS_MODULE, "ide_port");
+ if (IS_ERR(ide_port_class)) {
+ ret = PTR_ERR(ide_port_class);
+ goto out_port_class;
+ }
+
+ proc_ide_create();
+
+ return 0;
+
+out_port_class:
+ bus_unregister(&ide_bus_type);
+
+ return ret;
+}
+
+static void __exit ide_exit(void)
+{
+ proc_ide_destroy();
+
+ class_destroy(ide_port_class);
+
+ bus_unregister(&ide_bus_type);
+}
+
+module_init(ide_init);
+module_exit(ide_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/local.h b/windhoek/ide/local.h
new file mode 100644
index 00000000..c2d0952a
--- /dev/null
+++ b/windhoek/ide/local.h
@@ -0,0 +1,11 @@
+#ifndef LOCAL_H
+#define LOCAL_H
+
+#define DEBUG_MSG(msg, ...) {}
+
+// printk("%s: \033[31m"msg"\033[0m\n", __FUNCTION__, ##__VA_ARGS__)
+
+#define CONFIG_IDE_GD 1
+#define CONFIG_IDE_GD_ATA 1
+
+#endif
diff --git a/windhoek/ide/piix.c b/windhoek/ide/piix.c
new file mode 100644
index 00000000..f1e2e4ef
--- /dev/null
+++ b/windhoek/ide/piix.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat
+ * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * Documentation:
+ *
+ * Publically available from Intel web site. Errata documentation
+ * is also publically available. As an aide to anyone hacking on this
+ * driver the list of errata that are relevant is below.going back to
+ * PIIX4. Older device documentation is now a bit tricky to find.
+ *
+ * Errata of note:
+ *
+ * Unfixable
+ * PIIX4 errata #9 - Only on ultra obscure hw
+ * ICH3 errata #13 - Not observed to affect real hw
+ * by Intel
+ *
+ * Things we must deal with
+ * PIIX4 errata #10 - BM IDE hang with non UDMA
+ * (must stop/start dma to recover)
+ * 440MX errata #15 - As PIIX4 errata #10
+ * PIIX4 errata #15 - Must not read control registers
+ * during a PIO transfer
+ * 440MX errata #13 - As PIIX4 errata #15
+ * ICH2 errata #21 - DMA mode 0 doesn't work right
+ * ICH0/1 errata #55 - As ICH2 errata #21
+ * ICH2 spec c #9 - Extra operations needed to handle
+ * drive hotswap [NOT YET SUPPORTED]
+ * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
+ * and must be dword aligned
+ * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
+ *
+ * Should have been BIOS fixed:
+ * 450NX: errata #19 - DMA hangs on old 450NX
+ * 450NX: errata #20 - DMA hangs on old 450NX
+ * 450NX: errata #25 - Corruption with DMA on old 450NX
+ * ICH3 errata #15 - IDE deadlock under high load
+ * (BIOS must set dev 31 fn 0 bit 23)
+ * ICH3 errata #18 - Don't use native mode
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+
+#define DRV_NAME "piix"
+
+static int no_piix_dma;
+
+/**
+ * piix_set_pio_mode - set host controller for PIO mode
+ * @drive: drive
+ * @pio: PIO mode number
+ *
+ * Set the interface PIO mode based upon the settings done by AMI BIOS.
+ */
+
+static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ int is_slave = drive->dn & 1;
+ int master_port = hwif->channel ? 0x42 : 0x40;
+ int slave_port = 0x44;
+ unsigned long flags;
+ u16 master_data;
+ u8 slave_data;
+ static DEFINE_SPINLOCK(tune_lock);
+ int control = 0;
+
+ /* ISP RTC */
+ static const u8 timings[][2]= {
+ { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ /*
+ * Master vs slave is synchronized above us but the slave register is
+ * shared by the two hwifs so the corner case of two slave timeouts in
+ * parallel must be locked.
+ */
+ spin_lock_irqsave(&tune_lock, flags);
+ pci_read_config_word(dev, master_port, &master_data);
+
+ if (pio > 1)
+ control |= 1; /* Programmable timing on */
+ if (drive->media == ide_disk)
+ control |= 4; /* Prefetch, post write */
+ if (pio > 2)
+ control |= 2; /* IORDY */
+ if (is_slave) {
+ master_data |= 0x4000;
+ master_data &= ~0x0070;
+ if (pio > 1) {
+ /* Set PPE, IE and TIME */
+ master_data |= control << 4;
+ }
+ pci_read_config_byte(dev, slave_port, &slave_data);
+ slave_data &= hwif->channel ? 0x0f : 0xf0;
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
+ (hwif->channel ? 4 : 0);
+ } else {
+ master_data &= ~0x3307;
+ if (pio > 1) {
+ /* enable PPE, IE and TIME */
+ master_data |= control;
+ }
+ master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+ }
+ pci_write_config_word(dev, master_port, master_data);
+ if (is_slave)
+ pci_write_config_byte(dev, slave_port, slave_data);
+ spin_unlock_irqrestore(&tune_lock, flags);
+}
+
+/**
+ * piix_set_dma_mode - set host controller for DMA mode
+ * @drive: drive
+ * @speed: DMA mode
+ *
+ * Set a PIIX host controller to the desired DMA mode. This involves
+ * programming the right timing data into the PCI configuration space.
+ */
+
+static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ u8 maslave = hwif->channel ? 0x42 : 0x40;
+ int a_speed = 3 << (drive->dn * 4);
+ int u_flag = 1 << drive->dn;
+ int v_flag = 0x01 << drive->dn;
+ int w_flag = 0x10 << drive->dn;
+ int u_speed = 0;
+ int sitre;
+ u16 reg4042, reg4a;
+ u8 reg48, reg54, reg55;
+
+ pci_read_config_word(dev, maslave, &reg4042);
+ sitre = (reg4042 & 0x4000) ? 1 : 0;
+ pci_read_config_byte(dev, 0x48, &reg48);
+ pci_read_config_word(dev, 0x4a, &reg4a);
+ pci_read_config_byte(dev, 0x54, &reg54);
+ pci_read_config_byte(dev, 0x55, &reg55);
+
+ if (speed >= XFER_UDMA_0) {
+ u8 udma = speed - XFER_UDMA_0;
+
+ u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
+
+ if (!(reg48 & u_flag))
+ pci_write_config_byte(dev, 0x48, reg48 | u_flag);
+ if (speed == XFER_UDMA_5) {
+ pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
+ } else {
+ pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
+ }
+ if ((reg4a & a_speed) != u_speed)
+ pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
+ if (speed > XFER_UDMA_2) {
+ if (!(reg54 & v_flag))
+ pci_write_config_byte(dev, 0x54, reg54 | v_flag);
+ } else
+ pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
+ } else {
+ const u8 mwdma_to_pio[] = { 0, 3, 4 };
+ u8 pio;
+
+ if (reg48 & u_flag)
+ pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
+ if (reg4a & a_speed)
+ pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ if (reg54 & v_flag)
+ pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
+ if (reg55 & w_flag)
+ pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
+
+ if (speed >= XFER_MW_DMA_0)
+ pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ else
+ pio = 2; /* only SWDMA2 is allowed */
+
+ piix_set_pio_mode(drive, pio);
+ }
+}
+
+/**
+ * init_chipset_ich - set up the ICH chipset
+ * @dev: PCI device to set up
+ *
+ * Initialize the PCI device as required. For the ICH this turns
+ * out to be nice and simple.
+ */
+
+static unsigned int init_chipset_ich(struct pci_dev *dev)
+{
+ u32 extra = 0;
+
+ pci_read_config_dword(dev, 0x54, &extra);
+ pci_write_config_dword(dev, 0x54, extra | 0x400);
+
+ return 0;
+}
+
+/**
+ * ich_clear_irq - clear BMDMA status
+ * @drive: IDE drive
+ *
+ * ICHx contollers set DMA INTR no matter DMA or PIO.
+ * BMDMA status might need to be cleared even for
+ * PIO interrupts to prevent spurious/lost IRQ.
+ */
+static void ich_clear_irq(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 dma_stat;
+
+ /*
+ * ide_dma_end() needs BMDMA status for error checking.
+ * So, skip clearing BMDMA status here and leave it
+ * to ide_dma_end() if this is DMA interrupt.
+ */
+ if (drive->waiting_for_dma || hwif->dma_base == 0)
+ return;
+
+ /* clear the INTR & ERROR bits */
+ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
+ /* Should we force the bit as well ? */
+ outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
+}
+
+struct ich_laptop {
+ u16 device;
+ u16 subvendor;
+ u16 subdevice;
+};
+
+/*
+ * List of laptops that use short cables rather than 80 wire
+ */
+
+static const struct ich_laptop ich_laptop[] = {
+ /* devid, subvendor, subdev */
+ { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
+ { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
+ { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
+ { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
+ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
+ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
+ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */
+ { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
+ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
+ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
+ /* end marker */
+ { 0, }
+};
+
+static u8 piix_cable_detect(ide_hwif_t *hwif)
+{
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
+ const struct ich_laptop *lap = &ich_laptop[0];
+ u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30;
+
+ /* check for specials */
+ while (lap->device) {
+ if (lap->device == pdev->device &&
+ lap->subvendor == pdev->subsystem_vendor &&
+ lap->subdevice == pdev->subsystem_device) {
+ return ATA_CBL_PATA40_SHORT;
+ }
+ lap++;
+ }
+
+ pci_read_config_byte(pdev, 0x54, &reg54h);
+
+ return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
+}
+
+/**
+ * init_hwif_piix - fill in the hwif for the PIIX
+ * @hwif: IDE interface
+ *
+ * Set up the ide_hwif_t for the PIIX interface according to the
+ * capabilities of the hardware.
+ */
+
+static void __devinit init_hwif_piix(ide_hwif_t *hwif)
+{
+ if (!hwif->dma_base)
+ return;
+
+ if (no_piix_dma)
+ hwif->ultra_mask = hwif->mwdma_mask = hwif->swdma_mask = 0;
+}
+
+static const struct ide_port_ops piix_port_ops = {
+ .set_pio_mode = piix_set_pio_mode,
+ .set_dma_mode = piix_set_dma_mode,
+ .cable_detect = piix_cable_detect,
+};
+
+static const struct ide_port_ops ich_port_ops = {
+ .set_pio_mode = piix_set_pio_mode,
+ .set_dma_mode = piix_set_dma_mode,
+ .clear_irq = ich_clear_irq,
+ .cable_detect = piix_cable_detect,
+};
+
+#ifndef CONFIG_IA64
+ #define IDE_HFLAGS_PIIX IDE_HFLAG_LEGACY_IRQS
+#else
+ #define IDE_HFLAGS_PIIX 0
+#endif
+
+#define DECLARE_PIIX_DEV(udma) \
+ { \
+ .name = DRV_NAME, \
+ .init_hwif = init_hwif_piix, \
+ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
+ .port_ops = &piix_port_ops, \
+ .host_flags = IDE_HFLAGS_PIIX, \
+ .pio_mask = ATA_PIO4, \
+ .swdma_mask = ATA_SWDMA2_ONLY, \
+ .mwdma_mask = ATA_MWDMA12_ONLY, \
+ .udma_mask = udma, \
+ }
+
+#define DECLARE_ICH_DEV(udma) \
+ { \
+ .name = DRV_NAME, \
+ .init_chipset = init_chipset_ich, \
+ .init_hwif = init_hwif_piix, \
+ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
+ .port_ops = &ich_port_ops, \
+ .host_flags = IDE_HFLAGS_PIIX, \
+ .pio_mask = ATA_PIO4, \
+ .swdma_mask = ATA_SWDMA2_ONLY, \
+ .mwdma_mask = ATA_MWDMA12_ONLY, \
+ .udma_mask = udma, \
+ }
+
+static const struct ide_port_info piix_pci_info[] __devinitdata = {
+ /* 0: MPIIX */
+ { /*
+ * MPIIX actually has only a single IDE channel mapped to
+ * the primary or secondary ports depending on the value
+ * of the bit 14 of the IDETIM register at offset 0x6c
+ */
+ .name = DRV_NAME,
+ .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
+ .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA |
+ IDE_HFLAGS_PIIX,
+ .pio_mask = ATA_PIO4,
+ /* This is a painful system best to let it self tune for now */
+ },
+ /* 1: PIIXa/PIIXb/PIIX3 */
+ DECLARE_PIIX_DEV(0x00), /* no udma */
+ /* 2: PIIX4 */
+ DECLARE_PIIX_DEV(ATA_UDMA2),
+ /* 3: ICH0 */
+ DECLARE_ICH_DEV(ATA_UDMA2),
+ /* 4: ICH */
+ DECLARE_ICH_DEV(ATA_UDMA4),
+ /* 5: PIIX4 */
+ DECLARE_PIIX_DEV(ATA_UDMA4),
+ /* 6: ICH[2-7]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
+ DECLARE_ICH_DEV(ATA_UDMA5),
+};
+
+/**
+ * piix_init_one - called when a PIIX is found
+ * @dev: the piix device
+ * @id: the matching pci id
+ *
+ * Called when the PCI registration layer (or the IDE initialization)
+ * finds a device matching our IDE device tables.
+ */
+
+static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL);
+}
+
+/**
+ * piix_check_450nx - Check for problem 450NX setup
+ *
+ * Check for the present of 450NX errata #19 and errata #25. If
+ * they are found, disable use of DMA IDE
+ */
+
+static void __devinit piix_check_450nx(void)
+{
+ struct pci_dev *pdev = NULL;
+ u16 cfg;
+ while((pdev=pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL)
+ {
+ /* Look for 450NX PXB. Check for problem configurations
+ A PCI quirk checks bit 6 already */
+ pci_read_config_word(pdev, 0x41, &cfg);
+ /* Only on the original revision: IDE DMA can hang */
+ if (pdev->revision == 0x00)
+ no_piix_dma = 1;
+ /* On all revisions below 5 PXB bus lock must be disabled for IDE */
+ else if (cfg & (1<<14) && pdev->revision < 5)
+ no_piix_dma = 2;
+ }
+ if(no_piix_dma)
+ printk(KERN_WARNING DRV_NAME ": 450NX errata present, disabling IDE DMA.\n");
+ if(no_piix_dma == 2)
+ printk(KERN_WARNING DRV_NAME ": A BIOS update may resolve this.\n");
+}
+
+static const struct pci_device_id piix_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 1 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 0 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 1 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 2 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 3 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 2 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 4 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 5 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 2 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 6 },
+#ifdef CONFIG_BLK_DEV_IDE_SATA
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 6 },
+#endif
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
+
+static struct pci_driver piix_pci_driver = {
+ .name = "PIIX_IDE",
+ .id_table = piix_pci_tbl,
+ .probe = piix_init_one,
+ .remove = ide_pci_remove,
+ .suspend = ide_pci_suspend,
+ .resume = ide_pci_resume,
+};
+
+static int __init piix_ide_init(void)
+{
+ piix_check_450nx();
+ return ide_pci_register_driver(&piix_pci_driver);
+}
+
+static void __exit piix_ide_exit(void)
+{
+ pci_unregister_driver(&piix_pci_driver);
+}
+
+module_init(piix_ide_init);
+module_exit(piix_ide_exit);
+
+MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz");
+MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE");
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/setup-pci.c b/windhoek/ide/setup-pci.c
new file mode 100644
index 00000000..e85d1ed2
--- /dev/null
+++ b/windhoek/ide/setup-pci.c
@@ -0,0 +1,694 @@
+/*
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 1995-1998 Mark Lord
+ * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ide.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+
+/**
+ * ide_setup_pci_baseregs - place a PCI IDE controller native
+ * @dev: PCI device of interface to switch native
+ * @name: Name of interface
+ *
+ * We attempt to place the PCI interface into PCI native mode. If
+ * we succeed the BARs are ok and the controller is in PCI mode.
+ * Returns 0 on success or an errno code.
+ *
+ * FIXME: if we program the interface and then fail to set the BARS
+ * we don't switch it back to legacy mode. Do we actually care ??
+ */
+
+static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
+{
+ u8 progif = 0;
+
+ /*
+ * Place both IDE interfaces into PCI "native" mode:
+ */
+ if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
+ (progif & 5) != 5) {
+ if ((progif & 0xa) != 0xa) {
+ printk(KERN_INFO "%s %s: device not capable of full "
+ "native PCI mode\n", name, pci_name(dev));
+ return -EOPNOTSUPP;
+ }
+ printk(KERN_INFO "%s %s: placing both ports into native PCI "
+ "mode\n", name, pci_name(dev));
+ (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
+ if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
+ (progif & 5) != 5) {
+ printk(KERN_ERR "%s %s: rewrite of PROGIF failed, "
+ "wanted 0x%04x, got 0x%04x\n",
+ name, pci_name(dev), progif | 5, progif);
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+static int ide_pci_clear_simplex(unsigned long dma_base, const char *name)
+{
+ u8 dma_stat = inb(dma_base + 2);
+
+ outb(dma_stat & 0x60, dma_base + 2);
+ dma_stat = inb(dma_base + 2);
+
+ return (dma_stat & 0x80) ? 1 : 0;
+}
+
+/**
+ * ide_pci_dma_base - setup BMIBA
+ * @hwif: IDE interface
+ * @d: IDE port info
+ *
+ * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
+ */
+
+unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ unsigned long dma_base = 0;
+
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return hwif->dma_base;
+
+ if (hwif->mate && hwif->mate->dma_base) {
+ dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8);
+ } else {
+ u8 baridx = (d->host_flags & IDE_HFLAG_CS5520) ? 2 : 4;
+
+ dma_base = pci_resource_start(dev, baridx);
+
+ if (dma_base == 0) {
+ printk(KERN_ERR "%s %s: DMA base is invalid\n",
+ d->name, pci_name(dev));
+ return 0;
+ }
+ }
+
+ if (hwif->channel)
+ dma_base += 8;
+
+ return dma_base;
+}
+EXPORT_SYMBOL_GPL(ide_pci_dma_base);
+
+int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ u8 dma_stat;
+
+ if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
+ goto out;
+
+ if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
+ if (ide_pci_clear_simplex(hwif->dma_base, d->name))
+ printk(KERN_INFO "%s %s: simplex device: DMA forced\n",
+ d->name, pci_name(dev));
+ goto out;
+ }
+
+ /*
+ * If the device claims "simplex" DMA, this means that only one of
+ * the two interfaces can be trusted with DMA at any point in time
+ * (so we should enable DMA only on one of the two interfaces).
+ *
+ * FIXME: At this point we haven't probed the drives so we can't make
+ * the appropriate decision. Really we should defer this problem until
+ * we tune the drive then try to grab DMA ownership if we want to be
+ * the DMA end. This has to be become dynamic to handle hot-plug.
+ */
+ dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
+ if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
+ printk(KERN_INFO "%s %s: simplex device: DMA disabled\n",
+ d->name, pci_name(dev));
+ return -1;
+ }
+out:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
+
+/*
+ * Set up BM-DMA capability (PnP BIOS should have done this)
+ */
+int ide_pci_set_master(struct pci_dev *dev, const char *name)
+{
+ u16 pcicmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
+
+ if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
+ pci_set_master(dev);
+
+ if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
+ (pcicmd & PCI_COMMAND_MASTER) == 0) {
+ printk(KERN_ERR "%s %s: error updating PCICMD\n",
+ name, pci_name(dev));
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_set_master);
+#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+
+void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
+{
+ printk(KERN_INFO "%s %s: IDE controller (0x%04x:0x%04x rev 0x%02x)\n",
+ d->name, pci_name(dev),
+ dev->vendor, dev->device, dev->revision);
+}
+EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
+
+
+/**
+ * ide_pci_enable - do PCI enables
+ * @dev: PCI device
+ * @d: IDE port info
+ *
+ * Enable the IDE PCI device. We attempt to enable the device in full
+ * but if that fails then we only need IO space. The PCI code should
+ * have setup the proper resources for us already for controllers in
+ * legacy mode.
+ *
+ * Returns zero on success or an error code
+ */
+
+static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
+{
+ int ret, bars;
+
+ if (pci_enable_device(dev)) {
+ ret = pci_enable_device_io(dev);
+ if (ret < 0) {
+ printk(KERN_WARNING "%s %s: couldn't enable device\n",
+ d->name, pci_name(dev));
+ goto out;
+ }
+ printk(KERN_WARNING "%s %s: BIOS configuration fixed\n",
+ d->name, pci_name(dev));
+ }
+
+ /*
+ * assume all devices can do 32-bit DMA for now, we can add
+ * a DMA mask field to the struct ide_port_info if we need it
+ * (or let lower level driver set the DMA mask)
+ */
+ ret = pci_set_dma_mask(dev, DMA_32BIT_MASK);
+ if (ret < 0) {
+ printk(KERN_ERR "%s %s: can't set DMA mask\n",
+ d->name, pci_name(dev));
+ goto out;
+ }
+
+ if (d->host_flags & IDE_HFLAG_SINGLE)
+ bars = (1 << 2) - 1;
+ else
+ bars = (1 << 4) - 1;
+
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ if (d->host_flags & IDE_HFLAG_CS5520)
+ bars |= (1 << 2);
+ else
+ bars |= (1 << 4);
+ }
+
+ ret = pci_request_selected_regions(dev, bars, d->name);
+ if (ret < 0)
+ printk(KERN_ERR "%s %s: can't reserve resources\n",
+ d->name, pci_name(dev));
+out:
+ return ret;
+}
+
+/**
+ * ide_pci_configure - configure an unconfigured device
+ * @dev: PCI device
+ * @d: IDE port info
+ *
+ * Enable and configure the PCI device we have been passed.
+ * Returns zero on success or an error code.
+ */
+
+static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
+{
+ u16 pcicmd = 0;
+ /*
+ * PnP BIOS was *supposed* to have setup this device, but we
+ * can do it ourselves, so long as the BIOS has assigned an IRQ
+ * (or possibly the device is using a "legacy header" for IRQs).
+ * Maybe the user deliberately *disabled* the device,
+ * but we'll eventually ignore it again if no drives respond.
+ */
+ if (ide_setup_pci_baseregs(dev, d->name) ||
+ pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
+ printk(KERN_INFO "%s %s: device disabled (BIOS)\n",
+ d->name, pci_name(dev));
+ return -ENODEV;
+ }
+ if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) {
+ printk(KERN_ERR "%s %s: error accessing PCI regs\n",
+ d->name, pci_name(dev));
+ return -EIO;
+ }
+ if (!(pcicmd & PCI_COMMAND_IO)) {
+ printk(KERN_ERR "%s %s: unable to enable IDE controller\n",
+ d->name, pci_name(dev));
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/**
+ * ide_pci_check_iomem - check a register is I/O
+ * @dev: PCI device
+ * @d: IDE port info
+ * @bar: BAR number
+ *
+ * Checks if a BAR is configured and points to MMIO space. If so,
+ * return an error code. Otherwise return 0
+ */
+
+static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
+ int bar)
+{
+ ulong flags = pci_resource_flags(dev, bar);
+
+ /* Unconfigured ? */
+ if (!flags || pci_resource_len(dev, bar) == 0)
+ return 0;
+
+ /* I/O space */
+ if (flags & IORESOURCE_IO)
+ return 0;
+
+ /* Bad */
+ return -EINVAL;
+}
+
+/**
+ * ide_hw_configure - configure a hw_regs_t instance
+ * @dev: PCI device holding interface
+ * @d: IDE port info
+ * @port: port number
+ * @irq: PCI IRQ
+ * @hw: hw_regs_t instance corresponding to this port
+ *
+ * Perform the initial set up for the hardware interface structure. This
+ * is done per interface port rather than per PCI device. There may be
+ * more than one port per device.
+ *
+ * Returns zero on success or an error code.
+ */
+
+static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
+ unsigned int port, int irq, hw_regs_t *hw)
+{
+ unsigned long ctl = 0, base = 0;
+
+ if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
+ if (ide_pci_check_iomem(dev, d, 2 * port) ||
+ ide_pci_check_iomem(dev, d, 2 * port + 1)) {
+ printk(KERN_ERR "%s %s: I/O baseregs (BIOS) are "
+ "reported as MEM for port %d!\n",
+ d->name, pci_name(dev), port);
+ return -EINVAL;
+ }
+
+ ctl = pci_resource_start(dev, 2*port+1);
+ base = pci_resource_start(dev, 2*port);
+ } else {
+ /* Use default values */
+ ctl = port ? 0x374 : 0x3f4;
+ base = port ? 0x170 : 0x1f0;
+ }
+
+ if (!base || !ctl) {
+ printk(KERN_ERR "%s %s: bad PCI BARs for port %d, skipping\n",
+ d->name, pci_name(dev), port);
+ return -EINVAL;
+ }
+
+ memset(hw, 0, sizeof(*hw));
+ hw->irq = irq;
+ hw->dev = &dev->dev;
+ hw->chipset = d->chipset ? d->chipset : ide_pci;
+ ide_std_init_ports(hw, base, ctl | 2);
+
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+/**
+ * ide_hwif_setup_dma - configure DMA interface
+ * @hwif: IDE interface
+ * @d: IDE port info
+ *
+ * Set up the DMA base for the interface. Enable the master bits as
+ * necessary and attempt to bring the device DMA into a ready to use
+ * state
+ */
+
+int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+
+ if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
+ ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
+ (dev->class & 0x80))) {
+ unsigned long base = ide_pci_dma_base(hwif, d);
+
+ if (base == 0)
+ return -1;
+
+ hwif->dma_base = base;
+
+ if (hwif->dma_ops == NULL)
+ hwif->dma_ops = &sff_dma_ops;
+
+ if (ide_pci_check_simplex(hwif, d) < 0)
+ return -1;
+
+ if (ide_pci_set_master(dev, d->name) < 0)
+ return -1;
+
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
+ else
+ printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
+ hwif->name, base, base + 7);
+
+ hwif->extra_base = base + (hwif->channel ? 8 : 16);
+
+ if (ide_allocate_dma_engine(hwif))
+ return -1;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+
+/**
+ * ide_setup_pci_controller - set up IDE PCI
+ * @dev: PCI device
+ * @d: IDE port info
+ * @noisy: verbose flag
+ *
+ * Set up the PCI and controller side of the IDE interface. This brings
+ * up the PCI side of the device, checks that the device is enabled
+ * and enables it if need be
+ */
+
+static int ide_setup_pci_controller(struct pci_dev *dev,
+ const struct ide_port_info *d, int noisy)
+{
+ int ret;
+ u16 pcicmd;
+
+ if (noisy)
+ ide_setup_pci_noise(dev, d);
+
+ ret = ide_pci_enable(dev, d);
+ if (ret < 0)
+ goto out;
+
+ ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
+ if (ret < 0) {
+ printk(KERN_ERR "%s %s: error accessing PCI regs\n",
+ d->name, pci_name(dev));
+ goto out;
+ }
+ if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
+ ret = ide_pci_configure(dev, d);
+ if (ret < 0)
+ goto out;
+ printk(KERN_INFO "%s %s: device enabled (Linux)\n",
+ d->name, pci_name(dev));
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * ide_pci_setup_ports - configure ports/devices on PCI IDE
+ * @dev: PCI device
+ * @d: IDE port info
+ * @pciirq: IRQ line
+ * @hw: hw_regs_t instances corresponding to this PCI IDE device
+ * @hws: hw_regs_t pointers table to update
+ *
+ * Scan the interfaces attached to this device and do any
+ * necessary per port setup. Attach the devices and ask the
+ * generic DMA layer to do its work for us.
+ *
+ * Normally called automaticall from do_ide_pci_setup_device,
+ * but is also used directly as a helper function by some controllers
+ * where the chipset setup is not the default PCI IDE one.
+ */
+
+void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
+ int pciirq, hw_regs_t *hw, hw_regs_t **hws)
+{
+ int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
+ u8 tmp;
+
+ /*
+ * Set up the IDE ports
+ */
+
+ for (port = 0; port < channels; ++port) {
+ const struct ide_pci_enablebit *e = &d->enablebits[port];
+
+ if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
+ (tmp & e->mask) != e->val)) {
+ printk(KERN_INFO "%s %s: IDE port disabled\n",
+ d->name, pci_name(dev));
+ continue; /* port not enabled */
+ }
+
+ if (ide_hw_configure(dev, d, port, pciirq, hw + port))
+ continue;
+
+ *(hws + port) = hw + port;
+ }
+}
+EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
+
+/*
+ * ide_setup_pci_device() looks at the primary/secondary interfaces
+ * on a PCI IDE device and, if they are enabled, prepares the IDE driver
+ * for use with them. This generic code works for most PCI chipsets.
+ *
+ * One thing that is not standardized is the location of the
+ * primary/secondary interface "enable/disable" bits. For chipsets that
+ * we "know" about, this information is in the struct ide_port_info;
+ * for all other chipsets, we just assume both interfaces are enabled.
+ */
+static int do_ide_setup_pci_device(struct pci_dev *dev,
+ const struct ide_port_info *d,
+ u8 noisy)
+{
+ int pciirq, ret;
+
+ /*
+ * Can we trust the reported IRQ?
+ */
+ pciirq = dev->irq;
+
+ /*
+ * This allows offboard ide-pci cards the enable a BIOS,
+ * verify interrupt settings of split-mirror pci-config
+ * space, place chipset into init-mode, and/or preserve
+ * an interrupt if the card is not native ide support.
+ */
+ ret = d->init_chipset ? d->init_chipset(dev) : 0;
+ if (ret < 0)
+ goto out;
+
+ if (ide_pci_is_in_compatibility_mode(dev)) {
+ if (noisy)
+ printk(KERN_INFO "%s %s: not 100%% native mode: will "
+ "probe irqs later\n", d->name, pci_name(dev));
+ pciirq = ret;
+ } else if (!pciirq && noisy) {
+ printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
+ d->name, pci_name(dev), pciirq);
+ } else if (noisy) {
+ printk(KERN_INFO "%s %s: 100%% native mode on irq %d\n",
+ d->name, pci_name(dev), pciirq);
+ }
+
+ ret = pciirq;
+out:
+ return ret;
+}
+
+int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
+ void *priv)
+{
+ struct ide_host *host;
+ hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+ int ret;
+
+ ret = ide_setup_pci_controller(dev, d, 1);
+ if (ret < 0)
+ goto out;
+
+ ide_pci_setup_ports(dev, d, 0, &hw[0], &hws[0]);
+
+ host = ide_host_alloc(d, hws);
+ if (host == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host->dev[0] = &dev->dev;
+
+ host->host_priv = priv;
+
+ pci_set_drvdata(dev, host);
+
+ ret = do_ide_setup_pci_device(dev, d, 1);
+ if (ret < 0)
+ goto out;
+
+ /* fixup IRQ */
+ hw[1].irq = hw[0].irq = ret;
+
+ ret = ide_host_register(host, d, hws);
+ if (ret)
+ ide_host_free(host);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ide_pci_init_one);
+
+int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
+ const struct ide_port_info *d, void *priv)
+{
+ struct pci_dev *pdev[] = { dev1, dev2 };
+ struct ide_host *host;
+ int ret, i;
+ hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+
+ for (i = 0; i < 2; i++) {
+ ret = ide_setup_pci_controller(pdev[i], d, !i);
+ if (ret < 0)
+ goto out;
+
+ ide_pci_setup_ports(pdev[i], d, 0, &hw[i*2], &hws[i*2]);
+ }
+
+ host = ide_host_alloc(d, hws);
+ if (host == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host->dev[0] = &dev1->dev;
+ host->dev[1] = &dev2->dev;
+
+ host->host_priv = priv;
+
+ pci_set_drvdata(pdev[0], host);
+ pci_set_drvdata(pdev[1], host);
+
+ for (i = 0; i < 2; i++) {
+ ret = do_ide_setup_pci_device(pdev[i], d, !i);
+
+ /*
+ * FIXME: Mom, mom, they stole me the helper function to undo
+ * do_ide_setup_pci_device() on the first device!
+ */
+ if (ret < 0)
+ goto out;
+
+ /* fixup IRQ */
+ hw[i*2 + 1].irq = hw[i*2].irq = ret;
+ }
+
+ ret = ide_host_register(host, d, hws);
+ if (ret)
+ ide_host_free(host);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ide_pci_init_two);
+
+void ide_pci_remove(struct pci_dev *dev)
+{
+ struct ide_host *host = pci_get_drvdata(dev);
+ struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
+ int bars;
+
+ if (host->host_flags & IDE_HFLAG_SINGLE)
+ bars = (1 << 2) - 1;
+ else
+ bars = (1 << 4) - 1;
+
+ if ((host->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ if (host->host_flags & IDE_HFLAG_CS5520)
+ bars |= (1 << 2);
+ else
+ bars |= (1 << 4);
+ }
+
+ ide_host_remove(host);
+
+ if (dev2)
+ pci_release_selected_regions(dev2, bars);
+ pci_release_selected_regions(dev, bars);
+
+ if (dev2)
+ pci_disable_device(dev2);
+ pci_disable_device(dev);
+}
+EXPORT_SYMBOL_GPL(ide_pci_remove);
+
+#ifdef CONFIG_PM
+int ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ pci_save_state(dev);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_suspend);
+
+int ide_pci_resume(struct pci_dev *dev)
+{
+ struct ide_host *host = pci_get_drvdata(dev);
+ int rc;
+
+ pci_set_power_state(dev, PCI_D0);
+
+ rc = pci_enable_device(dev);
+ if (rc)
+ return rc;
+
+ pci_restore_state(dev);
+ pci_set_master(dev);
+
+ if (host->init_chipset)
+ host->init_chipset(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_resume);
+#endif
diff --git a/windhoek/include/Makefile b/windhoek/include/Makefile
new file mode 100644
index 00000000..8d31023f
--- /dev/null
+++ b/windhoek/include/Makefile
@@ -0,0 +1,9 @@
+PKGDIR ?= ..
+L4DIR ?= $(PKGDIR)/../..
+
+# All haeder files found in this directory tree will be automatically
+# installed in a way that they can be included with
+# #include <l4/pkgname/yourfile.h> later.
+# No need to list them in this Makefile.
+
+include $(L4DIR)/mk/include.mk
diff --git a/windhoek/include/types.h b/windhoek/include/types.h
new file mode 100644
index 00000000..c8aaeb00
--- /dev/null
+++ b/windhoek/include/types.h
@@ -0,0 +1,135 @@
+/**
+ * \file windhoek/include/types.h
+ * \brief Windhoek data types
+ *
+ * \date 2008-02-15
+ * \author Bjoern Doebel <doebel@tudos.org>
+ *
+ * (c) 2008 Technische Universitaet Dresden
+ * This file is part of DROPS, which is distributed under the terms of the
+ * GNU General Public License 2. Please see the COPYING file for details.
+ */
+
+#ifndef WINDHOEK_DATA_TYPES_H
+#define WINDHOEK_DATA_TYPES_H
+
+#include <l4/dm_generic/types.h>
+#include <l4/log/l4log.h>
+#include <l4/util/macros.h>
+
+#define MAX_NAME_LEN 16
+
+/** Windhoek BIO types */
+enum
+{
+ BIO_READ,
+ BIO_WRITE
+};
+
+/*
+ * Windhoek manages connections using dataspaces. Every connection is
+ * represented by a server-side handle that the client needs to use whenever
+ * it calls Windhoek. Using this handle, the server manages client state
+ * containing exactly one control dataspace (CTRL) and an arbitrary number of
+ * data dataspaces (DATA).
+ *
+ * CTRL layout:
+ *
+ * +----------------------------------------------------------+
+ * | HEAD |
+ * | * device description (struct windhoek_device) |
+ * +----------------------------------------------------------+
+ * | REQUEST DESCRIPTORS |
+ * | * array of request descriptors |
+ * | (struct windhoek_bio) |
+ * | |
+ * | ... |
+ * +----------------------------------------------------------+
+ *
+ *
+ *
+ * DATA layout:
+ *
+ * DATA spaces contain arbitrary block data and are managed by
+ * the client. For requests, the client allocates an appropriate region inside
+ * a DATA space and hands over this dataspace's ID and an offset along with
+ * the request struct to the server.
+ */
+
+
+/*
+ * Handle to a block device. Retrieved during open() call.
+ */
+typedef l4_int32_t blockdev_handle_t;
+
+
+/** Information about a Windhoek block device
+ */
+typedef struct
+{
+ unsigned int start_sector; /* First sector on disk belonging
+ to this dev */
+ unsigned int block_size; /* sector size */
+ unsigned int cylinders;
+ unsigned int heads;
+ unsigned int sectors; /* Number of sectors per cylinder */
+ unsigned int capacity; /* total no of sectors */
+ unsigned long flags; /* flags */
+ char name[MAX_NAME_LEN]; /* device name */
+ blockdev_handle_t handle; /* device handle */
+} windhoek_device;
+
+
+/** Dump block device info
+ */
+L4_INLINE void dump_windhoek_device(windhoek_device *dev);
+L4_INLINE void dump_windhoek_device(windhoek_device *dev)
+{
+ LOG("Windhoek device '%s', @ %p", dev->name, dev);
+ LOG("\tsectors %d, startsect %x", dev->sectors, dev->start_sector);
+ LOG("\tblock size %d, heads %d, cyl %d", dev->block_size,
+ dev->heads, dev->cylinders);
+ LOG("\tflags %lx, handle %x", dev->flags, dev->handle);
+}
+
+
+/** Just like BDDF, a BIO consists of one or more chunks inside a dataspace
+ */
+typedef struct
+{
+ l4dm_dataspace_t ds; /**< dataspace ID */
+ unsigned int offset; /**< offset in DS */
+ unsigned int size; /**< chunk size */
+} ds_list_element;
+
+
+L4_INLINE void dump_ds_element(ds_list_element *d);
+L4_INLINE void dump_ds_element(ds_list_element *d)
+{
+ LOG("ds id %04x, offset %x, size %x", d->ds.id, d->offset, d->size);
+}
+
+
+/** BIO struct used for transmitting requests
+ */
+typedef struct
+{
+ unsigned int uid; /**< unique identifier - managed by clients */
+ unsigned int type; /**< bio type */
+ blockdev_handle_t handle; /**< handle to block device */
+ unsigned int start_sector; /**< start sector of request */
+ unsigned short sg_count; /**< number of SG elements */
+ /* XXX: Must be last element! */
+ ds_list_element sg_data[0]; /**< SG data */
+} windhoek_bio;
+
+
+L4_INLINE void dump_windhoek_bio(windhoek_bio *bio);
+L4_INLINE void dump_windhoek_bio(windhoek_bio *bio)
+{
+ LOG("bio type %s", bio->type == BIO_READ ? "read" : "write");
+ LOG("handle %x, start sect %x, sg_count %x", bio->handle,
+ bio->start_sector, bio->sg_count);
+}
+
+#endif
diff --git a/windhoek/main.c b/windhoek/main.c
new file mode 100644
index 00000000..58786479
--- /dev/null
+++ b/windhoek/main.c
@@ -0,0 +1,88 @@
+/**
+ * \file windhoek/server/src/main.c
+ * \brief Windhoek main function
+ *
+ * \date 2008-01-29
+ * \author Bjoern Doebel <doebel@tudos.org>
+ *
+ * (c) 2008 Technische Universitaet Dresden
+ * This file is part of DROPS, which is distributed under the terms of the
+ * GNU General Public License 2. Please see the COPYING file for details.
+ */
+#include <asm/current.h>
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/bio.h>
+#include <linux/hdreg.h>
+#include <linux/list.h>
+#include <linux/buffer_head.h>
+
+#include <l4/dde/dde.h>
+#include <l4/dde/ddekit/initcall.h>
+#include <l4/dde/ddekit/assert.h>
+#include <l4/dde/linux26/dde26.h>
+
+#include <l4/util/parse_cmd.h>
+#include <l4/util/util.h>
+#include <l4/log/l4log.h>
+#include <l4/names/libnames.h>
+
+#include "windhoek_local.h"
+#include <l4/windhoek/server-internal-server.h>
+
+extern int bdev_cache_init(void);
+extern int ide_generic_init(void);
+extern int ide_cdrom_init(void);
+extern int genhd_device_init(void);
+
+l4_threadid_t main_thread = L4_INVALID_ID;
+
+int main(int argc, const char **argv)
+{
+ int err;
+
+ l4dde26_init();
+ l4dde26_process_init();
+ l4dde26_init_timers();
+ l4dde26_softirq_init();
+ printk("DDE base system initialized.\n");
+ err = bdev_cache_init();
+ printk("Initialized blockdev caches. (%x)\n", err);
+
+ gendisk_init();
+ client_state_init();
+ l4dde26_do_initcalls();
+
+ /* no generic driver, we use a dedicated one
+ * XXX: make this a fallback if no other driver is found */
+#if 0
+ err = ide_generic_init();
+ printk("Initialized generic IDE driver. (%x)\n", err);
+#endif
+
+
+ err = names_register("windhoek");
+ printk("registered at names. \n", err);
+
+ main_thread = l4_myself();
+
+ printk("+----------------------------------------+\n");
+ printk("| Windhoek block server |\n");
+ printk("| ready to rumble.... |\n");
+ printk("+----------------------------------------+\n");
+
+ windhoek_server_server_loop(NULL);
+
+ l4_sleep_forever();
+
+ return 0;
+}
diff --git a/windhoek/partitions/Kconfig b/windhoek/partitions/Kconfig
new file mode 100644
index 00000000..cb5f0a3f
--- /dev/null
+++ b/windhoek/partitions/Kconfig
@@ -0,0 +1,251 @@
+#
+# Partition configuration
+#
+config PARTITION_ADVANCED
+ bool "Advanced partition selection"
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned under an operating system running on a different
+ architecture than your Linux system.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about foreign partitioning schemes.
+
+ If unsure, say N.
+
+config ACORN_PARTITION
+ bool "Acorn partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ help
+ Support hard disks partitioned under Acorn operating systems.
+
+config ACORN_PARTITION_CUMANA
+ bool "Cumana partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned using the Cumana interface on Acorn machines.
+
+config ACORN_PARTITION_EESOX
+ bool "EESOX partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+
+config ACORN_PARTITION_ICS
+ bool "ICS partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned using the ICS interface on Acorn machines.
+
+config ACORN_PARTITION_ADFS
+ bool "Native filecore partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ The Acorn Disc Filing System is the standard file system of the
+ RiscOS operating system which runs on Acorn's ARM-based Risc PC
+ systems and the Acorn Archimedes range of machines. If you say
+ `Y' here, Linux will support disk partitions created under ADFS.
+
+config ACORN_PARTITION_POWERTEC
+ bool "PowerTec partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ Support reading partition tables created on Acorn machines using
+ the PowerTec SCSI drive.
+
+config ACORN_PARTITION_RISCIX
+ bool "RISCiX partition support" if PARTITION_ADVANCED
+ default y if ARCH_ACORN
+ depends on ACORN_PARTITION
+ help
+ Once upon a time, there was a native Unix port for the Acorn series
+ of machines called RISCiX. If you say 'Y' here, Linux will be able
+ to read disks partitioned under RISCiX.
+
+config OSF_PARTITION
+ bool "Alpha OSF partition support" if PARTITION_ADVANCED
+ default y if ALPHA
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned on an Alpha machine.
+
+config AMIGA_PARTITION
+ bool "Amiga partition table support" if PARTITION_ADVANCED
+ default y if (AMIGA || AFFS_FS=y)
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned under AmigaOS.
+
+config ATARI_PARTITION
+ bool "Atari partition table support" if PARTITION_ADVANCED
+ default y if ATARI
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned under the Atari OS.
+
+config IBM_PARTITION
+ bool "IBM disk label and partition support"
+ depends on PARTITION_ADVANCED && S390
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by IBM DASD disks operating under CMS.
+ Otherwise, say N.
+
+config MAC_PARTITION
+ bool "Macintosh partition map support" if PARTITION_ADVANCED
+ default y if (MAC || PPC_PMAC)
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned on a Macintosh.
+
+config MSDOS_PARTITION
+ bool "PC BIOS (MSDOS partition tables) support" if PARTITION_ADVANCED
+ default y
+ help
+ Say Y here.
+
+config BSD_DISKLABEL
+ bool "BSD disklabel (FreeBSD partition tables) support"
+ depends on PARTITION_ADVANCED && MSDOS_PARTITION
+ help
+ FreeBSD uses its own hard disk partition scheme on your PC. It
+ requires only one entry in the primary partition table of your disk
+ and manages it similarly to DOS extended partitions, putting in its
+ first sector a new partition table in BSD disklabel format. Saying Y
+ here allows you to read these disklabels and further mount FreeBSD
+ partitions from within Linux if you have also said Y to "UFS
+ file system support", above. If you don't know what all this is
+ about, say N.
+
+config MINIX_SUBPARTITION
+ bool "Minix subpartition support"
+ depends on PARTITION_ADVANCED && MSDOS_PARTITION
+ help
+ Minix 2.0.0/2.0.2 subpartition table support for Linux.
+ Say Y here if you want to mount and use Minix 2.0.0/2.0.2
+ subpartitions.
+
+config SOLARIS_X86_PARTITION
+ bool "Solaris (x86) partition table support"
+ depends on PARTITION_ADVANCED && MSDOS_PARTITION
+ help
+ Like most systems, Solaris x86 uses its own hard disk partition
+ table format, incompatible with all others. Saying Y here allows you
+ to read these partition tables and further mount Solaris x86
+ partitions from within Linux if you have also said Y to "UFS
+ file system support", above.
+
+config UNIXWARE_DISKLABEL
+ bool "Unixware slices support"
+ depends on PARTITION_ADVANCED && MSDOS_PARTITION
+ ---help---
+ Like some systems, UnixWare uses its own slice table inside a
+ partition (VTOC - Virtual Table of Contents). Its format is
+ incompatible with all other OSes. Saying Y here allows you to read
+ VTOC and further mount UnixWare partitions read-only from within
+ Linux if you have also said Y to "UFS file system support" or
+ "System V and Coherent file system support", above.
+
+ This is mainly used to carry data from a UnixWare box to your
+ Linux box via a removable medium like magneto-optical, ZIP or
+ removable IDE drives. Note, however, that a good portable way to
+ transport files and directories between unixes (and even other
+ operating systems) is given by the tar program ("man tar" or
+ preferably "info tar").
+
+ If you don't know what all this is about, say N.
+
+config LDM_PARTITION
+ bool "Windows Logical Disk Manager (Dynamic Disk) support"
+ depends on PARTITION_ADVANCED
+ ---help---
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned using Windows 2000's/XP's or Vista's Logical Disk
+ Manager. They are also known as "Dynamic Disks".
+
+ Note this driver only supports Dynamic Disks with a protective MBR
+ label, i.e. DOS partition table. It does not support GPT labelled
+ Dynamic Disks yet as can be created with Vista.
+
+ Windows 2000 introduced the concept of Dynamic Disks to get around
+ the limitations of the PC's partitioning scheme. The Logical Disk
+ Manager allows the user to repartition a disk and create spanned,
+ mirrored, striped or RAID volumes, all without the need for
+ rebooting.
+
+ Normal partitions are now called Basic Disks under Windows 2000, XP,
+ and Vista.
+
+ For a fuller description read <file:Documentation/ldm.txt>.
+
+ If unsure, say N.
+
+config LDM_DEBUG
+ bool "Windows LDM extra logging"
+ depends on LDM_PARTITION
+ help
+ Say Y here if you would like LDM to log verbosely. This could be
+ helpful if the driver doesn't work as expected and you'd like to
+ report a bug.
+
+ If unsure, say N.
+
+config SGI_PARTITION
+ bool "SGI partition support" if PARTITION_ADVANCED
+ default y if DEFAULT_SGI_PARTITION
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by SGI machines.
+
+config ULTRIX_PARTITION
+ bool "Ultrix partition table support" if PARTITION_ADVANCED
+ default y if MACH_DECSTATION
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by DEC (now Compaq) Ultrix machines.
+ Otherwise, say N.
+
+config SUN_PARTITION
+ bool "Sun partition tables support" if PARTITION_ADVANCED
+ default y if (SPARC || SUN3 || SUN3X)
+ ---help---
+ Like most systems, SunOS uses its own hard disk partition table
+ format, incompatible with all others. Saying Y here allows you to
+ read these partition tables and further mount SunOS partitions from
+ within Linux if you have also said Y to "UFS file system support",
+ above. This is mainly used to carry data from a SPARC under SunOS to
+ your Linux box via a removable medium like magneto-optical or ZIP
+ drives; note however that a good portable way to transport files and
+ directories between unixes (and even other operating systems) is
+ given by the tar program ("man tar" or preferably "info tar"). If
+ you don't know what all this is about, say N.
+
+config KARMA_PARTITION
+ bool "Karma Partition support"
+ depends on PARTITION_ADVANCED
+ help
+ Say Y here if you would like to mount the Rio Karma MP3 player, as it
+ uses a proprietary partition table.
+
+config EFI_PARTITION
+ bool "EFI GUID Partition support"
+ depends on PARTITION_ADVANCED
+ select CRC32
+ help
+ Say Y here if you would like to use hard disks under Linux which
+ were partitioned using EFI GPT.
+
+config SYSV68_PARTITION
+ bool "SYSV68 partition table support" if PARTITION_ADVANCED
+ default y if VME
+ help
+ Say Y here if you would like to be able to read the hard disk
+ partition table format used by Motorola Delta machines (using
+ sysv68).
+ Otherwise, say N.
diff --git a/windhoek/partitions/Makefile b/windhoek/partitions/Makefile
new file mode 100644
index 00000000..03af8eac
--- /dev/null
+++ b/windhoek/partitions/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-$(CONFIG_BLOCK) := check.o
+
+obj-$(CONFIG_ACORN_PARTITION) += acorn.o
+obj-$(CONFIG_AMIGA_PARTITION) += amiga.o
+obj-$(CONFIG_ATARI_PARTITION) += atari.o
+obj-$(CONFIG_MAC_PARTITION) += mac.o
+obj-$(CONFIG_LDM_PARTITION) += ldm.o
+obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
+obj-$(CONFIG_OSF_PARTITION) += osf.o
+obj-$(CONFIG_SGI_PARTITION) += sgi.o
+obj-$(CONFIG_SUN_PARTITION) += sun.o
+obj-$(CONFIG_ULTRIX_PARTITION) += ultrix.o
+obj-$(CONFIG_IBM_PARTITION) += ibm.o
+obj-$(CONFIG_EFI_PARTITION) += efi.o
+obj-$(CONFIG_KARMA_PARTITION) += karma.o
+obj-$(CONFIG_SYSV68_PARTITION) += sysv68.o
diff --git a/windhoek/partitions/acorn.c b/windhoek/partitions/acorn.c
new file mode 100644
index 00000000..a97b477a
--- /dev/null
+++ b/windhoek/partitions/acorn.c
@@ -0,0 +1,555 @@
+/*
+ * linux/fs/partitions/acorn.c
+ *
+ * Copyright (c) 1996-2000 Russell King.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Scan ADFS partitions on hard disk drives. Unfortunately, there
+ * isn't a standard for partitioning drives on Acorn machines, so
+ * every single manufacturer of SCSI and IDE cards created their own
+ * method.
+ */
+#include <linux/buffer_head.h>
+#include <linux/adfs_fs.h>
+
+#include "check.h"
+#include "acorn.h"
+
+/*
+ * Partition types. (Oh for reusability)
+ */
+#define PARTITION_RISCIX_MFM 1
+#define PARTITION_RISCIX_SCSI 2
+#define PARTITION_LINUX 9
+
+#if defined(CONFIG_ACORN_PARTITION_CUMANA) || \
+ defined(CONFIG_ACORN_PARTITION_ADFS)
+static struct adfs_discrecord *
+adfs_partition(struct parsed_partitions *state, char *name, char *data,
+ unsigned long first_sector, int slot)
+{
+ struct adfs_discrecord *dr;
+ unsigned int nr_sects;
+
+ if (adfs_checkbblk(data))
+ return NULL;
+
+ dr = (struct adfs_discrecord *)(data + 0x1c0);
+
+ if (dr->disc_size == 0 && dr->disc_size_high == 0)
+ return NULL;
+
+ nr_sects = (le32_to_cpu(dr->disc_size_high) << 23) |
+ (le32_to_cpu(dr->disc_size) >> 9);
+
+ if (name)
+ printk(" [%s]", name);
+ put_partition(state, slot, first_sector, nr_sects);
+ return dr;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_RISCIX
+
+struct riscix_part {
+ __le32 start;
+ __le32 length;
+ __le32 one;
+ char name[16];
+};
+
+struct riscix_record {
+ __le32 magic;
+#define RISCIX_MAGIC cpu_to_le32(0x4a657320)
+ __le32 date;
+ struct riscix_part part[8];
+};
+
+#if defined(CONFIG_ACORN_PARTITION_CUMANA) || \
+ defined(CONFIG_ACORN_PARTITION_ADFS)
+static int
+riscix_partition(struct parsed_partitions *state, struct block_device *bdev,
+ unsigned long first_sect, int slot, unsigned long nr_sects)
+{
+ Sector sect;
+ struct riscix_record *rr;
+
+ rr = (struct riscix_record *)read_dev_sector(bdev, first_sect, &sect);
+ if (!rr)
+ return -1;
+
+ printk(" [RISCiX]");
+
+
+ if (rr->magic == RISCIX_MAGIC) {
+ unsigned long size = nr_sects > 2 ? 2 : nr_sects;
+ int part;
+
+ printk(" <");
+
+ put_partition(state, slot++, first_sect, size);
+ for (part = 0; part < 8; part++) {
+ if (rr->part[part].one &&
+ memcmp(rr->part[part].name, "All\0", 4)) {
+ put_partition(state, slot++,
+ le32_to_cpu(rr->part[part].start),
+ le32_to_cpu(rr->part[part].length));
+ printk("(%s)", rr->part[part].name);
+ }
+ }
+
+ printk(" >\n");
+ } else {
+ put_partition(state, slot++, first_sect, nr_sects);
+ }
+
+ put_dev_sector(sect);
+ return slot;
+}
+#endif
+#endif
+
+#define LINUX_NATIVE_MAGIC 0xdeafa1de
+#define LINUX_SWAP_MAGIC 0xdeafab1e
+
+struct linux_part {
+ __le32 magic;
+ __le32 start_sect;
+ __le32 nr_sects;
+};
+
+#if defined(CONFIG_ACORN_PARTITION_CUMANA) || \
+ defined(CONFIG_ACORN_PARTITION_ADFS)
+static int
+linux_partition(struct parsed_partitions *state, struct block_device *bdev,
+ unsigned long first_sect, int slot, unsigned long nr_sects)
+{
+ Sector sect;
+ struct linux_part *linuxp;
+ unsigned long size = nr_sects > 2 ? 2 : nr_sects;
+
+ printk(" [Linux]");
+
+ put_partition(state, slot++, first_sect, size);
+
+ linuxp = (struct linux_part *)read_dev_sector(bdev, first_sect, &sect);
+ if (!linuxp)
+ return -1;
+
+ printk(" <");
+ while (linuxp->magic == cpu_to_le32(LINUX_NATIVE_MAGIC) ||
+ linuxp->magic == cpu_to_le32(LINUX_SWAP_MAGIC)) {
+ if (slot == state->limit)
+ break;
+ put_partition(state, slot++, first_sect +
+ le32_to_cpu(linuxp->start_sect),
+ le32_to_cpu(linuxp->nr_sects));
+ linuxp ++;
+ }
+ printk(" >");
+
+ put_dev_sector(sect);
+ return slot;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_CUMANA
+int
+adfspart_check_CUMANA(struct parsed_partitions *state, struct block_device *bdev)
+{
+ unsigned long first_sector = 0;
+ unsigned int start_blk = 0;
+ Sector sect;
+ unsigned char *data;
+ char *name = "CUMANA/ADFS";
+ int first = 1;
+ int slot = 1;
+
+ /*
+ * Try Cumana style partitions - sector 6 contains ADFS boot block
+ * with pointer to next 'drive'.
+ *
+ * There are unknowns in this code - is the 'cylinder number' of the
+ * next partition relative to the start of this one - I'm assuming
+ * it is.
+ *
+ * Also, which ID did Cumana use?
+ *
+ * This is totally unfinished, and will require more work to get it
+ * going. Hence it is totally untested.
+ */
+ do {
+ struct adfs_discrecord *dr;
+ unsigned int nr_sects;
+
+ data = read_dev_sector(bdev, start_blk * 2 + 6, &sect);
+ if (!data)
+ return -1;
+
+ if (slot == state->limit)
+ break;
+
+ dr = adfs_partition(state, name, data, first_sector, slot++);
+ if (!dr)
+ break;
+
+ name = NULL;
+
+ nr_sects = (data[0x1fd] + (data[0x1fe] << 8)) *
+ (dr->heads + (dr->lowsector & 0x40 ? 1 : 0)) *
+ dr->secspertrack;
+
+ if (!nr_sects)
+ break;
+
+ first = 0;
+ first_sector += nr_sects;
+ start_blk += nr_sects >> (BLOCK_SIZE_BITS - 9);
+ nr_sects = 0; /* hmm - should be partition size */
+
+ switch (data[0x1fc] & 15) {
+ case 0: /* No partition / ADFS? */
+ break;
+
+#ifdef CONFIG_ACORN_PARTITION_RISCIX
+ case PARTITION_RISCIX_SCSI:
+ /* RISCiX - we don't know how to find the next one. */
+ slot = riscix_partition(state, bdev, first_sector,
+ slot, nr_sects);
+ break;
+#endif
+
+ case PARTITION_LINUX:
+ slot = linux_partition(state, bdev, first_sector,
+ slot, nr_sects);
+ break;
+ }
+ put_dev_sector(sect);
+ if (slot == -1)
+ return -1;
+ } while (1);
+ put_dev_sector(sect);
+ return first ? 0 : 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_ADFS
+/*
+ * Purpose: allocate ADFS partitions.
+ *
+ * Params : hd - pointer to gendisk structure to store partition info.
+ * dev - device number to access.
+ *
+ * Returns: -1 on error, 0 for no ADFS boot sector, 1 for ok.
+ *
+ * Alloc : hda = whole drive
+ * hda1 = ADFS partition on first drive.
+ * hda2 = non-ADFS partition.
+ */
+int
+adfspart_check_ADFS(struct parsed_partitions *state, struct block_device *bdev)
+{
+ unsigned long start_sect, nr_sects, sectscyl, heads;
+ Sector sect;
+ unsigned char *data;
+ struct adfs_discrecord *dr;
+ unsigned char id;
+ int slot = 1;
+
+ data = read_dev_sector(bdev, 6, &sect);
+ if (!data)
+ return -1;
+
+ dr = adfs_partition(state, "ADFS", data, 0, slot++);
+ if (!dr) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ heads = dr->heads + ((dr->lowsector >> 6) & 1);
+ sectscyl = dr->secspertrack * heads;
+ start_sect = ((data[0x1fe] << 8) + data[0x1fd]) * sectscyl;
+ id = data[0x1fc] & 15;
+ put_dev_sector(sect);
+
+ /*
+ * Work out start of non-adfs partition.
+ */
+ nr_sects = (bdev->bd_inode->i_size >> 9) - start_sect;
+
+ if (start_sect) {
+ switch (id) {
+#ifdef CONFIG_ACORN_PARTITION_RISCIX
+ case PARTITION_RISCIX_SCSI:
+ case PARTITION_RISCIX_MFM:
+ slot = riscix_partition(state, bdev, start_sect,
+ slot, nr_sects);
+ break;
+#endif
+
+ case PARTITION_LINUX:
+ slot = linux_partition(state, bdev, start_sect,
+ slot, nr_sects);
+ break;
+ }
+ }
+ printk("\n");
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_ICS
+
+struct ics_part {
+ __le32 start;
+ __le32 size;
+};
+
+static int adfspart_check_ICSLinux(struct block_device *bdev, unsigned long block)
+{
+ Sector sect;
+ unsigned char *data = read_dev_sector(bdev, block, &sect);
+ int result = 0;
+
+ if (data) {
+ if (memcmp(data, "LinuxPart", 9) == 0)
+ result = 1;
+ put_dev_sector(sect);
+ }
+
+ return result;
+}
+
+/*
+ * Check for a valid ICS partition using the checksum.
+ */
+static inline int valid_ics_sector(const unsigned char *data)
+{
+ unsigned long sum;
+ int i;
+
+ for (i = 0, sum = 0x50617274; i < 508; i++)
+ sum += data[i];
+
+ sum -= le32_to_cpu(*(__le32 *)(&data[508]));
+
+ return sum == 0;
+}
+
+/*
+ * Purpose: allocate ICS partitions.
+ * Params : hd - pointer to gendisk structure to store partition info.
+ * dev - device number to access.
+ * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok.
+ * Alloc : hda = whole drive
+ * hda1 = ADFS partition 0 on first drive.
+ * hda2 = ADFS partition 1 on first drive.
+ * ..etc..
+ */
+int
+adfspart_check_ICS(struct parsed_partitions *state, struct block_device *bdev)
+{
+ const unsigned char *data;
+ const struct ics_part *p;
+ int slot;
+ Sector sect;
+
+ /*
+ * Try ICS style partitions - sector 0 contains partition info.
+ */
+ data = read_dev_sector(bdev, 0, &sect);
+ if (!data)
+ return -1;
+
+ if (!valid_ics_sector(data)) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ printk(" [ICS]");
+
+ for (slot = 1, p = (const struct ics_part *)data; p->size; p++) {
+ u32 start = le32_to_cpu(p->start);
+ s32 size = le32_to_cpu(p->size); /* yes, it's signed. */
+
+ if (slot == state->limit)
+ break;
+
+ /*
+ * Negative sizes tell the RISC OS ICS driver to ignore
+ * this partition - in effect it says that this does not
+ * contain an ADFS filesystem.
+ */
+ if (size < 0) {
+ size = -size;
+
+ /*
+ * Our own extension - We use the first sector
+ * of the partition to identify what type this
+ * partition is. We must not make this visible
+ * to the filesystem.
+ */
+ if (size > 1 && adfspart_check_ICSLinux(bdev, start)) {
+ start += 1;
+ size -= 1;
+ }
+ }
+
+ if (size)
+ put_partition(state, slot++, start, size);
+ }
+
+ put_dev_sector(sect);
+ printk("\n");
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_POWERTEC
+struct ptec_part {
+ __le32 unused1;
+ __le32 unused2;
+ __le32 start;
+ __le32 size;
+ __le32 unused5;
+ char type[8];
+};
+
+static inline int valid_ptec_sector(const unsigned char *data)
+{
+ unsigned char checksum = 0x2a;
+ int i;
+
+ /*
+ * If it looks like a PC/BIOS partition, then it
+ * probably isn't PowerTec.
+ */
+ if (data[510] == 0x55 && data[511] == 0xaa)
+ return 0;
+
+ for (i = 0; i < 511; i++)
+ checksum += data[i];
+
+ return checksum == data[511];
+}
+
+/*
+ * Purpose: allocate ICS partitions.
+ * Params : hd - pointer to gendisk structure to store partition info.
+ * dev - device number to access.
+ * Returns: -1 on error, 0 for no ICS table, 1 for partitions ok.
+ * Alloc : hda = whole drive
+ * hda1 = ADFS partition 0 on first drive.
+ * hda2 = ADFS partition 1 on first drive.
+ * ..etc..
+ */
+int
+adfspart_check_POWERTEC(struct parsed_partitions *state, struct block_device *bdev)
+{
+ Sector sect;
+ const unsigned char *data;
+ const struct ptec_part *p;
+ int slot = 1;
+ int i;
+
+ data = read_dev_sector(bdev, 0, &sect);
+ if (!data)
+ return -1;
+
+ if (!valid_ptec_sector(data)) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ printk(" [POWERTEC]");
+
+ for (i = 0, p = (const struct ptec_part *)data; i < 12; i++, p++) {
+ u32 start = le32_to_cpu(p->start);
+ u32 size = le32_to_cpu(p->size);
+
+ if (size)
+ put_partition(state, slot++, start, size);
+ }
+
+ put_dev_sector(sect);
+ printk("\n");
+ return 1;
+}
+#endif
+
+#ifdef CONFIG_ACORN_PARTITION_EESOX
+struct eesox_part {
+ char magic[6];
+ char name[10];
+ __le32 start;
+ __le32 unused6;
+ __le32 unused7;
+ __le32 unused8;
+};
+
+/*
+ * Guess who created this format?
+ */
+static const char eesox_name[] = {
+ 'N', 'e', 'i', 'l', ' ',
+ 'C', 'r', 'i', 't', 'c', 'h', 'e', 'l', 'l', ' ', ' '
+};
+
+/*
+ * EESOX SCSI partition format.
+ *
+ * This is a goddamned awful partition format. We don't seem to store
+ * the size of the partition in this table, only the start addresses.
+ *
+ * There are two possibilities where the size comes from:
+ * 1. The individual ADFS boot block entries that are placed on the disk.
+ * 2. The start address of the next entry.
+ */
+int
+adfspart_check_EESOX(struct parsed_partitions *state, struct block_device *bdev)
+{
+ Sector sect;
+ const unsigned char *data;
+ unsigned char buffer[256];
+ struct eesox_part *p;
+ sector_t start = 0;
+ int i, slot = 1;
+
+ data = read_dev_sector(bdev, 7, &sect);
+ if (!data)
+ return -1;
+
+ /*
+ * "Decrypt" the partition table. God knows why...
+ */
+ for (i = 0; i < 256; i++)
+ buffer[i] = data[i] ^ eesox_name[i & 15];
+
+ put_dev_sector(sect);
+
+ for (i = 0, p = (struct eesox_part *)buffer; i < 8; i++, p++) {
+ sector_t next;
+
+ if (memcmp(p->magic, "Eesox", 6))
+ break;
+
+ next = le32_to_cpu(p->start);
+ if (i)
+ put_partition(state, slot++, start, next - start);
+ start = next;
+ }
+
+ if (i != 0) {
+ sector_t size;
+
+ size = get_capacity(bdev->bd_disk);
+ put_partition(state, slot++, start, size - start);
+ printk("\n");
+ }
+
+ return i ? 1 : 0;
+}
+#endif
diff --git a/windhoek/partitions/acorn.h b/windhoek/partitions/acorn.h
new file mode 100644
index 00000000..81fd50ec
--- /dev/null
+++ b/windhoek/partitions/acorn.h
@@ -0,0 +1,14 @@
+/*
+ * linux/fs/partitions/acorn.h
+ *
+ * Copyright (C) 1996-2001 Russell King.
+ *
+ * I _hate_ this partitioning mess - why can't we have one defined
+ * format, and everyone stick to it?
+ */
+
+int adfspart_check_CUMANA(struct parsed_partitions *state, struct block_device *bdev);
+int adfspart_check_ADFS(struct parsed_partitions *state, struct block_device *bdev);
+int adfspart_check_ICS(struct parsed_partitions *state, struct block_device *bdev);
+int adfspart_check_POWERTEC(struct parsed_partitions *state, struct block_device *bdev);
+int adfspart_check_EESOX(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/windhoek/partitions/amiga.c b/windhoek/partitions/amiga.c
new file mode 100644
index 00000000..9917a8c3
--- /dev/null
+++ b/windhoek/partitions/amiga.c
@@ -0,0 +1,130 @@
+/*
+ * fs/partitions/amiga.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/types.h>
+#include <linux/affs_hardblocks.h>
+
+#include "check.h"
+#include "amiga.h"
+
+static __inline__ u32
+checksum_block(__be32 *m, int size)
+{
+ u32 sum = 0;
+
+ while (size--)
+ sum += be32_to_cpu(*m++);
+ return sum;
+}
+
+int
+amiga_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ Sector sect;
+ unsigned char *data;
+ struct RigidDiskBlock *rdb;
+ struct PartitionBlock *pb;
+ int start_sect, nr_sects, blk, part, res = 0;
+ int blksize = 1; /* Multiplier for disk block size */
+ int slot = 1;
+ char b[BDEVNAME_SIZE];
+
+ for (blk = 0; ; blk++, put_dev_sector(sect)) {
+ if (blk == RDB_ALLOCATION_LIMIT)
+ goto rdb_done;
+ data = read_dev_sector(bdev, blk, &sect);
+ if (!data) {
+ if (warn_no_part)
+ printk("Dev %s: unable to read RDB block %d\n",
+ bdevname(bdev, b), blk);
+ res = -1;
+ goto rdb_done;
+ }
+ if (*(__be32 *)data != cpu_to_be32(IDNAME_RIGIDDISK))
+ continue;
+
+ rdb = (struct RigidDiskBlock *)data;
+ if (checksum_block((__be32 *)data, be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F) == 0)
+ break;
+ /* Try again with 0xdc..0xdf zeroed, Windows might have
+ * trashed it.
+ */
+ *(__be32 *)(data+0xdc) = 0;
+ if (checksum_block((__be32 *)data,
+ be32_to_cpu(rdb->rdb_SummedLongs) & 0x7F)==0) {
+ printk("Warning: Trashed word at 0xd0 in block %d "
+ "ignored in checksum calculation\n",blk);
+ break;
+ }
+
+ printk("Dev %s: RDB in block %d has bad checksum\n",
+ bdevname(bdev, b), blk);
+ }
+
+ /* blksize is blocks per 512 byte standard block */
+ blksize = be32_to_cpu( rdb->rdb_BlockBytes ) / 512;
+
+ printk(" RDSK (%d)", blksize * 512); /* Be more informative */
+ blk = be32_to_cpu(rdb->rdb_PartitionList);
+ put_dev_sector(sect);
+ for (part = 1; blk>0 && part<=16; part++, put_dev_sector(sect)) {
+ blk *= blksize; /* Read in terms partition table understands */
+ data = read_dev_sector(bdev, blk, &sect);
+ if (!data) {
+ if (warn_no_part)
+ printk("Dev %s: unable to read partition block %d\n",
+ bdevname(bdev, b), blk);
+ res = -1;
+ goto rdb_done;
+ }
+ pb = (struct PartitionBlock *)data;
+ blk = be32_to_cpu(pb->pb_Next);
+ if (pb->pb_ID != cpu_to_be32(IDNAME_PARTITION))
+ continue;
+ if (checksum_block((__be32 *)pb, be32_to_cpu(pb->pb_SummedLongs) & 0x7F) != 0 )
+ continue;
+
+ /* Tell Kernel about it */
+
+ nr_sects = (be32_to_cpu(pb->pb_Environment[10]) + 1 -
+ be32_to_cpu(pb->pb_Environment[9])) *
+ be32_to_cpu(pb->pb_Environment[3]) *
+ be32_to_cpu(pb->pb_Environment[5]) *
+ blksize;
+ if (!nr_sects)
+ continue;
+ start_sect = be32_to_cpu(pb->pb_Environment[9]) *
+ be32_to_cpu(pb->pb_Environment[3]) *
+ be32_to_cpu(pb->pb_Environment[5]) *
+ blksize;
+ put_partition(state,slot++,start_sect,nr_sects);
+ {
+ /* Be even more informative to aid mounting */
+ char dostype[4];
+ __be32 *dt = (__be32 *)dostype;
+ *dt = pb->pb_Environment[16];
+ if (dostype[3] < ' ')
+ printk(" (%c%c%c^%c)",
+ dostype[0], dostype[1],
+ dostype[2], dostype[3] + '@' );
+ else
+ printk(" (%c%c%c%c)",
+ dostype[0], dostype[1],
+ dostype[2], dostype[3]);
+ printk("(res %d spb %d)",
+ be32_to_cpu(pb->pb_Environment[6]),
+ be32_to_cpu(pb->pb_Environment[4]));
+ }
+ res = 1;
+ }
+ printk("\n");
+
+rdb_done:
+ return res;
+}
diff --git a/windhoek/partitions/amiga.h b/windhoek/partitions/amiga.h
new file mode 100644
index 00000000..2f3e9ce2
--- /dev/null
+++ b/windhoek/partitions/amiga.h
@@ -0,0 +1,6 @@
+/*
+ * fs/partitions/amiga.h
+ */
+
+int amiga_partition(struct parsed_partitions *state, struct block_device *bdev);
+
diff --git a/windhoek/partitions/atari.c b/windhoek/partitions/atari.c
new file mode 100644
index 00000000..1f3572d5
--- /dev/null
+++ b/windhoek/partitions/atari.c
@@ -0,0 +1,149 @@
+/*
+ * fs/partitions/atari.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/ctype.h>
+#include "check.h"
+#include "atari.h"
+
+/* ++guenther: this should be settable by the user ("make config")?.
+ */
+#define ICD_PARTS
+
+/* check if a partition entry looks valid -- Atari format is assumed if at
+ least one of the primary entries is ok this way */
+#define VALID_PARTITION(pi,hdsiz) \
+ (((pi)->flg & 1) && \
+ isalnum((pi)->id[0]) && isalnum((pi)->id[1]) && isalnum((pi)->id[2]) && \
+ be32_to_cpu((pi)->st) <= (hdsiz) && \
+ be32_to_cpu((pi)->st) + be32_to_cpu((pi)->siz) <= (hdsiz))
+
+static inline int OK_id(char *s)
+{
+ return memcmp (s, "GEM", 3) == 0 || memcmp (s, "BGM", 3) == 0 ||
+ memcmp (s, "LNX", 3) == 0 || memcmp (s, "SWP", 3) == 0 ||
+ memcmp (s, "RAW", 3) == 0 ;
+}
+
+int atari_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ Sector sect;
+ struct rootsector *rs;
+ struct partition_info *pi;
+ u32 extensect;
+ u32 hd_size;
+ int slot;
+#ifdef ICD_PARTS
+ int part_fmt = 0; /* 0:unknown, 1:AHDI, 2:ICD/Supra */
+#endif
+
+ rs = (struct rootsector *) read_dev_sector(bdev, 0, &sect);
+ if (!rs)
+ return -1;
+
+ /* Verify this is an Atari rootsector: */
+ hd_size = bdev->bd_inode->i_size >> 9;
+ if (!VALID_PARTITION(&rs->part[0], hd_size) &&
+ !VALID_PARTITION(&rs->part[1], hd_size) &&
+ !VALID_PARTITION(&rs->part[2], hd_size) &&
+ !VALID_PARTITION(&rs->part[3], hd_size)) {
+ /*
+ * if there's no valid primary partition, assume that no Atari
+ * format partition table (there's no reliable magic or the like
+ * :-()
+ */
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ pi = &rs->part[0];
+ printk (" AHDI");
+ for (slot = 1; pi < &rs->part[4] && slot < state->limit; slot++, pi++) {
+ struct rootsector *xrs;
+ Sector sect2;
+ ulong partsect;
+
+ if ( !(pi->flg & 1) )
+ continue;
+ /* active partition */
+ if (memcmp (pi->id, "XGM", 3) != 0) {
+ /* we don't care about other id's */
+ put_partition (state, slot, be32_to_cpu(pi->st),
+ be32_to_cpu(pi->siz));
+ continue;
+ }
+ /* extension partition */
+#ifdef ICD_PARTS
+ part_fmt = 1;
+#endif
+ printk(" XGM<");
+ partsect = extensect = be32_to_cpu(pi->st);
+ while (1) {
+ xrs = (struct rootsector *)read_dev_sector(bdev, partsect, &sect2);
+ if (!xrs) {
+ printk (" block %ld read failed\n", partsect);
+ put_dev_sector(sect);
+ return -1;
+ }
+
+ /* ++roman: sanity check: bit 0 of flg field must be set */
+ if (!(xrs->part[0].flg & 1)) {
+ printk( "\nFirst sub-partition in extended partition is not valid!\n" );
+ put_dev_sector(sect2);
+ break;
+ }
+
+ put_partition(state, slot,
+ partsect + be32_to_cpu(xrs->part[0].st),
+ be32_to_cpu(xrs->part[0].siz));
+
+ if (!(xrs->part[1].flg & 1)) {
+ /* end of linked partition list */
+ put_dev_sector(sect2);
+ break;
+ }
+ if (memcmp( xrs->part[1].id, "XGM", 3 ) != 0) {
+ printk("\nID of extended partition is not XGM!\n");
+ put_dev_sector(sect2);
+ break;
+ }
+
+ partsect = be32_to_cpu(xrs->part[1].st) + extensect;
+ put_dev_sector(sect2);
+ if (++slot == state->limit) {
+ printk( "\nMaximum number of partitions reached!\n" );
+ break;
+ }
+ }
+ printk(" >");
+ }
+#ifdef ICD_PARTS
+ if ( part_fmt!=1 ) { /* no extended partitions -> test ICD-format */
+ pi = &rs->icdpart[0];
+ /* sanity check: no ICD format if first partition invalid */
+ if (OK_id(pi->id)) {
+ printk(" ICD<");
+ for (; pi < &rs->icdpart[8] && slot < state->limit; slot++, pi++) {
+ /* accept only GEM,BGM,RAW,LNX,SWP partitions */
+ if (!((pi->flg & 1) && OK_id(pi->id)))
+ continue;
+ part_fmt = 2;
+ put_partition (state, slot,
+ be32_to_cpu(pi->st),
+ be32_to_cpu(pi->siz));
+ }
+ printk(" >");
+ }
+ }
+#endif
+ put_dev_sector(sect);
+
+ printk ("\n");
+
+ return 1;
+}
diff --git a/windhoek/partitions/atari.h b/windhoek/partitions/atari.h
new file mode 100644
index 00000000..63186b00
--- /dev/null
+++ b/windhoek/partitions/atari.h
@@ -0,0 +1,34 @@
+/*
+ * fs/partitions/atari.h
+ * Moved by Russell King from:
+ *
+ * linux/include/linux/atari_rootsec.h
+ * definitions for Atari Rootsector layout
+ * by Andreas Schwab (schwab@ls5.informatik.uni-dortmund.de)
+ *
+ * modified for ICD/Supra partitioning scheme restricted to at most 12
+ * partitions
+ * by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
+ */
+
+struct partition_info
+{
+ u8 flg; /* bit 0: active; bit 7: bootable */
+ char id[3]; /* "GEM", "BGM", "XGM", or other */
+ __be32 st; /* start of partition */
+ __be32 siz; /* length of partition */
+};
+
+struct rootsector
+{
+ char unused[0x156]; /* room for boot code */
+ struct partition_info icdpart[8]; /* info for ICD-partitions 5..12 */
+ char unused2[0xc];
+ u32 hd_siz; /* size of disk in blocks */
+ struct partition_info part[4];
+ u32 bsl_st; /* start of bad sector list */
+ u32 bsl_cnt; /* length of bad sector list */
+ u16 checksum; /* checksum for bootable disks */
+} __attribute__((__packed__));
+
+int atari_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/windhoek/partitions/check.c b/windhoek/partitions/check.c
new file mode 100644
index 00000000..a8098337
--- /dev/null
+++ b/windhoek/partitions/check.c
@@ -0,0 +1,639 @@
+/*
+ * fs/partitions/check.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ *
+ * We now have independent partition support from the
+ * block drivers, which allows all the partition code to
+ * be grouped in one location, and it to be mostly self
+ * contained.
+ *
+ * Added needed MAJORS for new pairs, {hdi,hdj}, {hdk,hdl}
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kmod.h>
+#include <linux/ctype.h>
+#include <linux/genhd.h>
+
+#include "check.h"
+
+#include "acorn.h"
+#include "amiga.h"
+#include "atari.h"
+#include "ldm.h"
+#include "mac.h"
+#include "msdos.h"
+#include "osf.h"
+#include "sgi.h"
+#include "sun.h"
+#include "ibm.h"
+#include "ultrix.h"
+#include "efi.h"
+#include "karma.h"
+#include "sysv68.h"
+
+#ifdef CONFIG_BLK_DEV_MD
+extern void md_autodetect_dev(dev_t dev);
+#endif
+
+int warn_no_part = 1; /*This is ugly: should make genhd removable media aware*/
+
+static int (*check_part[])(struct parsed_partitions *, struct block_device *) = {
+ /*
+ * Probe partition formats with tables at disk address 0
+ * that also have an ADFS boot block at 0xdc0.
+ */
+#ifdef CONFIG_ACORN_PARTITION_ICS
+ adfspart_check_ICS,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_POWERTEC
+ adfspart_check_POWERTEC,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_EESOX
+ adfspart_check_EESOX,
+#endif
+
+ /*
+ * Now move on to formats that only have partition info at
+ * disk address 0xdc0. Since these may also have stale
+ * PC/BIOS partition tables, they need to come before
+ * the msdos entry.
+ */
+#ifdef CONFIG_ACORN_PARTITION_CUMANA
+ adfspart_check_CUMANA,
+#endif
+#ifdef CONFIG_ACORN_PARTITION_ADFS
+ adfspart_check_ADFS,
+#endif
+
+#ifdef CONFIG_EFI_PARTITION
+ efi_partition, /* this must come before msdos */
+#endif
+#ifdef CONFIG_SGI_PARTITION
+ sgi_partition,
+#endif
+#ifdef CONFIG_LDM_PARTITION
+ ldm_partition, /* this must come before msdos */
+#endif
+#ifdef CONFIG_MSDOS_PARTITION
+ msdos_partition,
+#endif
+#ifdef CONFIG_OSF_PARTITION
+ osf_partition,
+#endif
+#ifdef CONFIG_SUN_PARTITION
+ sun_partition,
+#endif
+#ifdef CONFIG_AMIGA_PARTITION
+ amiga_partition,
+#endif
+#ifdef CONFIG_ATARI_PARTITION
+ atari_partition,
+#endif
+#ifdef CONFIG_MAC_PARTITION
+ mac_partition,
+#endif
+#ifdef CONFIG_ULTRIX_PARTITION
+ ultrix_partition,
+#endif
+#ifdef CONFIG_IBM_PARTITION
+ ibm_partition,
+#endif
+#ifdef CONFIG_KARMA_PARTITION
+ karma_partition,
+#endif
+#ifdef CONFIG_SYSV68_PARTITION
+ sysv68_partition,
+#endif
+ NULL
+};
+
+/*
+ * disk_name() is used by partition check code and the genhd driver.
+ * It formats the devicename of the indicated disk into
+ * the supplied buffer (of size at least 32), and returns
+ * a pointer to that same buffer (for convenience).
+ */
+
+char *disk_name(struct gendisk *hd, int partno, char *buf)
+{
+ if (!partno)
+ snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name);
+ else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1]))
+ snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno);
+ else
+ snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno);
+
+ return buf;
+}
+
+const char *bdevname(struct block_device *bdev, char *buf)
+{
+ return disk_name(bdev->bd_disk, bdev->bd_part->partno, buf);
+}
+
+EXPORT_SYMBOL(bdevname);
+
+/*
+ * There's very little reason to use this, you should really
+ * have a struct block_device just about everywhere and use
+ * bdevname() instead.
+ */
+const char *__bdevname(dev_t dev, char *buffer)
+{
+ scnprintf(buffer, BDEVNAME_SIZE, "unknown-block(%u,%u)",
+ MAJOR(dev), MINOR(dev));
+ return buffer;
+}
+
+EXPORT_SYMBOL(__bdevname);
+
+static struct parsed_partitions *
+check_partition(struct gendisk *hd, struct block_device *bdev)
+{
+ struct parsed_partitions *state;
+ int i, res, err;
+
+ state = kmalloc(sizeof(struct parsed_partitions), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ disk_name(hd, 0, state->name);
+ printk(KERN_INFO " %s:", state->name);
+ if (isdigit(state->name[strlen(state->name)-1]))
+ sprintf(state->name, "p");
+
+ state->limit = disk_max_parts(hd);
+ i = res = err = 0;
+ while (!res && check_part[i]) {
+ memset(&state->parts, 0, sizeof(state->parts));
+ res = check_part[i++](state, bdev);
+ if (res < 0) {
+ /* We have hit an I/O error which we don't report now.
+ * But record it, and let the others do their job.
+ */
+ err = res;
+ res = 0;
+ }
+
+ }
+ if (res > 0)
+ return state;
+ if (err)
+ /* The partition is unrecognized. So report I/O errors if there were any */
+ res = err;
+ if (!res)
+ printk(" unknown partition table\n");
+ else if (warn_no_part)
+ printk(" unable to read partition table\n");
+ kfree(state);
+ return ERR_PTR(res);
+}
+
+static ssize_t part_partition_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+
+ return sprintf(buf, "%d\n", p->partno);
+}
+
+static ssize_t part_start_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+
+ return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect);
+}
+
+ssize_t part_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects);
+}
+
+ssize_t part_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ int cpu;
+
+ cpu = part_stat_lock();
+ part_round_stats(cpu, p);
+ part_stat_unlock();
+ return sprintf(buf,
+ "%8lu %8lu %8llu %8u "
+ "%8lu %8lu %8llu %8u "
+ "%8u %8u %8u"
+ "\n",
+ part_stat_read(p, ios[READ]),
+ part_stat_read(p, merges[READ]),
+ (unsigned long long)part_stat_read(p, sectors[READ]),
+ jiffies_to_msecs(part_stat_read(p, ticks[READ])),
+ part_stat_read(p, ios[WRITE]),
+ part_stat_read(p, merges[WRITE]),
+ (unsigned long long)part_stat_read(p, sectors[WRITE]),
+ jiffies_to_msecs(part_stat_read(p, ticks[WRITE])),
+ p->in_flight,
+ jiffies_to_msecs(part_stat_read(p, io_ticks)),
+ jiffies_to_msecs(part_stat_read(p, time_in_queue)));
+}
+
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ssize_t part_fail_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hd_struct *p = dev_to_part(dev);
+
+ return sprintf(buf, "%d\n", p->make_it_fail);
+}
+
+ssize_t part_fail_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ int i;
+
+ if (count > 0 && sscanf(buf, "%d", &i) > 0)
+ p->make_it_fail = (i == 0) ? 0 : 1;
+
+ return count;
+}
+#endif
+
+static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
+static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
+static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
+static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+static struct device_attribute dev_attr_fail =
+ __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
+#endif
+
+static struct attribute *part_attrs[] = {
+ &dev_attr_partition.attr,
+ &dev_attr_start.attr,
+ &dev_attr_size.attr,
+ &dev_attr_stat.attr,
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+ &dev_attr_fail.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group part_attr_group = {
+ .attrs = part_attrs,
+};
+
+static struct attribute_group *part_attr_groups[] = {
+ &part_attr_group,
+ NULL
+};
+
+static void part_release(struct device *dev)
+{
+ struct hd_struct *p = dev_to_part(dev);
+ free_part_stats(p);
+ kfree(p);
+}
+
+struct device_type part_type = {
+ .name = "partition",
+ .groups = part_attr_groups,
+ .release = part_release,
+};
+
+static void delete_partition_rcu_cb(struct rcu_head *head)
+{
+ struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
+
+ part->start_sect = 0;
+ part->nr_sects = 0;
+ part_stat_set_all(part, 0);
+ put_device(part_to_dev(part));
+}
+
+void delete_partition(struct gendisk *disk, int partno)
+{
+ struct disk_part_tbl *ptbl = disk->part_tbl;
+ struct hd_struct *part;
+
+ if (partno >= ptbl->len)
+ return;
+
+ part = ptbl->part[partno];
+ if (!part)
+ return;
+
+ blk_free_devt(part_devt(part));
+ rcu_assign_pointer(ptbl->part[partno], NULL);
+ rcu_assign_pointer(ptbl->last_lookup, NULL);
+ kobject_put(part->holder_dir);
+ device_del(part_to_dev(part));
+
+#ifndef DDE_LINUX
+ call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+#else
+ delete_partition_rcu_cb(&part->rcu_head);
+#endif
+}
+
+static ssize_t whole_disk_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return 0;
+}
+static DEVICE_ATTR(whole_disk, S_IRUSR | S_IRGRP | S_IROTH,
+ whole_disk_show, NULL);
+
+struct hd_struct *add_partition(struct gendisk *disk, int partno,
+ sector_t start, sector_t len, int flags)
+{
+ struct hd_struct *p;
+ dev_t devt = MKDEV(0, 0);
+ struct device *ddev = disk_to_dev(disk);
+ struct device *pdev;
+ struct disk_part_tbl *ptbl;
+ const char *dname;
+ int err;
+
+ err = disk_expand_part_tbl(disk, partno);
+ if (err)
+ return ERR_PTR(err);
+ ptbl = disk->part_tbl;
+
+ if (ptbl->part[partno])
+ return ERR_PTR(-EBUSY);
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-EBUSY);
+
+ if (!init_part_stats(p)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ pdev = part_to_dev(p);
+
+ p->start_sect = start;
+ p->nr_sects = len;
+ p->partno = partno;
+ p->policy = get_disk_ro(disk);
+
+ dname = dev_name(ddev);
+ if (isdigit(dname[strlen(dname) - 1]))
+ dev_set_name(pdev, "%sp%d", dname, partno);
+ else
+ dev_set_name(pdev, "%s%d", dname, partno);
+
+ device_initialize(pdev);
+ pdev->class = &block_class;
+ pdev->type = &part_type;
+ pdev->parent = ddev;
+
+ err = blk_alloc_devt(p, &devt);
+ if (err)
+ goto out_free_stats;
+ pdev->devt = devt;
+
+ /* delay uevent until 'holders' subdir is created */
+ pdev->uevent_suppress = 1;
+ err = device_add(pdev);
+ if (err)
+ goto out_put;
+
+ err = -ENOMEM;
+ p->holder_dir = kobject_create_and_add("holders", &pdev->kobj);
+ if (!p->holder_dir)
+ goto out_del;
+
+ pdev->uevent_suppress = 0;
+ if (flags & ADDPART_FLAG_WHOLEDISK) {
+ err = device_create_file(pdev, &dev_attr_whole_disk);
+ if (err)
+ goto out_del;
+ }
+
+ /* everything is up and running, commence */
+ INIT_RCU_HEAD(&p->rcu_head);
+ rcu_assign_pointer(ptbl->part[partno], p);
+
+ /* suppress uevent if the disk supresses it */
+ if (!ddev->uevent_suppress)
+ kobject_uevent(&pdev->kobj, KOBJ_ADD);
+
+ return p;
+
+out_free_stats:
+ free_part_stats(p);
+out_free:
+ kfree(p);
+ return ERR_PTR(err);
+out_del:
+ kobject_put(p->holder_dir);
+ device_del(pdev);
+out_put:
+ put_device(pdev);
+ blk_free_devt(devt);
+ return ERR_PTR(err);
+}
+
+/* Not exported, helper to add_disk(). */
+void register_disk(struct gendisk *disk)
+{
+ struct device *ddev = disk_to_dev(disk);
+ struct block_device *bdev;
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ int err;
+
+ ddev->parent = disk->driverfs_dev;
+
+ dev_set_name(ddev, disk->disk_name);
+
+ /* delay uevents, until we scanned partition table */
+ ddev->uevent_suppress = 1;
+
+ if (device_add(ddev))
+ return;
+#ifndef CONFIG_SYSFS_DEPRECATED
+ err = sysfs_create_link(block_depr, &ddev->kobj,
+ kobject_name(&ddev->kobj));
+ if (err) {
+ device_del(ddev);
+ return;
+ }
+#endif
+ disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
+ disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
+
+ /* No minors to use for partitions */
+ if (!disk_partitionable(disk))
+ goto exit;
+
+ /* No such device (e.g., media were just removed) */
+ if (!get_capacity(disk))
+ goto exit;
+
+ bdev = bdget_disk(disk, 0);
+ if (!bdev)
+ goto exit;
+
+ bdev->bd_invalidated = 1;
+ err = blkdev_get(bdev, FMODE_READ);
+ if (err < 0)
+ goto exit;
+ blkdev_put(bdev, FMODE_READ);
+
+exit:
+ /* announce disk after possible partitions are created */
+ ddev->uevent_suppress = 0;
+ kobject_uevent(&ddev->kobj, KOBJ_ADD);
+
+ /* announce possible partitions */
+ disk_part_iter_init(&piter, disk, 0);
+ while ((part = disk_part_iter_next(&piter)))
+ kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
+ disk_part_iter_exit(&piter);
+}
+
+int rescan_partitions(struct gendisk *disk, struct block_device *bdev)
+{
+#ifndef DDE_LINUX
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+ struct parsed_partitions *state;
+ int p, highest, res;
+
+ if (bdev->bd_part_count)
+ return -EBUSY;
+ res = invalidate_partition(disk, 0);
+ if (res)
+ return res;
+
+ disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
+ while ((part = disk_part_iter_next(&piter)))
+ delete_partition(disk, part->partno);
+ disk_part_iter_exit(&piter);
+
+ if (disk->fops->revalidate_disk)
+ disk->fops->revalidate_disk(disk);
+ check_disk_size_change(disk, bdev);
+ bdev->bd_invalidated = 0;
+ if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
+ return 0;
+ if (IS_ERR(state)) /* I/O error reading the partition table */
+ return -EIO;
+
+ /* tell userspace that the media / partition table may have changed */
+ kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
+
+ /* Detect the highest partition number and preallocate
+ * disk->part_tbl. This is an optimization and not strictly
+ * necessary.
+ */
+ for (p = 1, highest = 0; p < state->limit; p++)
+ if (state->parts[p].size)
+ highest = p;
+
+ disk_expand_part_tbl(disk, highest);
+
+ /* add partitions */
+ for (p = 1; p < state->limit; p++) {
+ sector_t size = state->parts[p].size;
+ sector_t from = state->parts[p].from;
+ if (!size)
+ continue;
+ if (from >= get_capacity(disk)) {
+ printk(KERN_WARNING
+ "%s: p%d ignored, start %llu is behind the end of the disk\n",
+ disk->disk_name, p, (unsigned long long) from);
+ continue;
+ }
+ if (from + size > get_capacity(disk)) {
+ /*
+ * we can not ignore partitions of broken tables
+ * created by for example camera firmware, but we
+ * limit them to the end of the disk to avoid
+ * creating invalid block devices
+ */
+ printk(KERN_WARNING
+ "%s: p%d size %llu limited to end of disk\n",
+ disk->disk_name, p, (unsigned long long) size);
+ size = get_capacity(disk) - from;
+ }
+ part = add_partition(disk, p, from, size,
+ state->parts[p].flags);
+ if (IS_ERR(part)) {
+ printk(KERN_ERR " %s: p%d could not be added: %ld\n",
+ disk->disk_name, p, -PTR_ERR(part));
+ continue;
+ }
+#ifdef CONFIG_BLK_DEV_MD
+ if (state->parts[p].flags & ADDPART_FLAG_RAID)
+ md_autodetect_dev(part_to_dev(part)->devt);
+#endif
+ }
+ kfree(state);
+#endif /* DDE_LINUX */
+ return 0;
+}
+
+unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+{
+ struct address_space *mapping = bdev->bd_inode->i_mapping;
+ struct page *page;
+
+ page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
+ NULL);
+ if (!IS_ERR(page)) {
+ if (PageError(page))
+ goto fail;
+ p->v = page;
+ return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
+fail:
+ page_cache_release(page);
+ }
+ p->v = NULL;
+ return NULL;
+}
+
+EXPORT_SYMBOL(read_dev_sector);
+
+void del_gendisk(struct gendisk *disk)
+{
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ /* invalidate stuff */
+ disk_part_iter_init(&piter, disk,
+ DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE);
+ while ((part = disk_part_iter_next(&piter))) {
+ invalidate_partition(disk, part->partno);
+ delete_partition(disk, part->partno);
+ }
+ disk_part_iter_exit(&piter);
+
+ invalidate_partition(disk, 0);
+ blk_free_devt(disk_to_dev(disk)->devt);
+ set_capacity(disk, 0);
+ disk->flags &= ~GENHD_FL_UP;
+ unlink_gendisk(disk);
+ part_stat_set_all(&disk->part0, 0);
+ disk->part0.stamp = 0;
+
+ kobject_put(disk->part0.holder_dir);
+ kobject_put(disk->slave_dir);
+ disk->driverfs_dev = NULL;
+#ifndef CONFIG_SYSFS_DEPRECATED
+ sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
+#endif
+ device_del(disk_to_dev(disk));
+}
diff --git a/windhoek/partitions/check.h b/windhoek/partitions/check.h
new file mode 100644
index 00000000..98dbe1a8
--- /dev/null
+++ b/windhoek/partitions/check.h
@@ -0,0 +1,30 @@
+#include <linux/pagemap.h>
+#include <linux/blkdev.h>
+
+/*
+ * add_gd_partition adds a partitions details to the devices partition
+ * description.
+ */
+struct parsed_partitions {
+ char name[BDEVNAME_SIZE];
+ struct {
+ sector_t from;
+ sector_t size;
+ int flags;
+ } parts[DISK_MAX_PARTS];
+ int next;
+ int limit;
+};
+
+static inline void
+put_partition(struct parsed_partitions *p, int n, sector_t from, sector_t size)
+{
+ if (n < p->limit) {
+ p->parts[n].from = from;
+ p->parts[n].size = size;
+ printk(" %s%d", p->name, n);
+ }
+}
+
+extern int warn_no_part;
+
diff --git a/windhoek/partitions/efi.c b/windhoek/partitions/efi.c
new file mode 100644
index 00000000..038a6022
--- /dev/null
+++ b/windhoek/partitions/efi.c
@@ -0,0 +1,631 @@
+/************************************************************
+ * EFI GUID Partition Table handling
+ * Per Intel EFI Specification v1.02
+ * http://developer.intel.com/technology/efi/efi.htm
+ * efi.[ch] by Matt Domsch <Matt_Domsch@dell.com>
+ * Copyright 2000,2001,2002,2004 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ *
+ * TODO:
+ *
+ * Changelog:
+ * Mon Nov 09 2004 Matt Domsch <Matt_Domsch@dell.com>
+ * - test for valid PMBR and valid PGPT before ever reading
+ * AGPT, allow override with 'gpt' kernel command line option.
+ * - check for first/last_usable_lba outside of size of disk
+ *
+ * Tue Mar 26 2002 Matt Domsch <Matt_Domsch@dell.com>
+ * - Ported to 2.5.7-pre1 and 2.5.7-dj2
+ * - Applied patch to avoid fault in alternate header handling
+ * - cleaned up find_valid_gpt
+ * - On-disk structure and copy in memory is *always* LE now -
+ * swab fields as needed
+ * - remove print_gpt_header()
+ * - only use first max_p partition entries, to keep the kernel minor number
+ * and partition numbers tied.
+ *
+ * Mon Feb 04 2002 Matt Domsch <Matt_Domsch@dell.com>
+ * - Removed __PRIPTR_PREFIX - not being used
+ *
+ * Mon Jan 14 2002 Matt Domsch <Matt_Domsch@dell.com>
+ * - Ported to 2.5.2-pre11 + library crc32 patch Linus applied
+ *
+ * Thu Dec 6 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Added compare_gpts().
+ * - moved le_efi_guid_to_cpus() back into this file. GPT is the only
+ * thing that keeps EFI GUIDs on disk.
+ * - Changed gpt structure names and members to be simpler and more Linux-like.
+ *
+ * Wed Oct 17 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Removed CONFIG_DEVFS_VOLUMES_UUID code entirely per Martin Wilck
+ *
+ * Wed Oct 10 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Changed function comments to DocBook style per Andreas Dilger suggestion.
+ *
+ * Mon Oct 08 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Change read_lba() to use the page cache per Al Viro's work.
+ * - print u64s properly on all architectures
+ * - fixed debug_printk(), now Dprintk()
+ *
+ * Mon Oct 01 2001 Matt Domsch <Matt_Domsch@dell.com>
+ * - Style cleanups
+ * - made most functions static
+ * - Endianness addition
+ * - remove test for second alternate header, as it's not per spec,
+ * and is unnecessary. There's now a method to read/write the last
+ * sector of an odd-sized disk from user space. No tools have ever
+ * been released which used this code, so it's effectively dead.
+ * - Per Asit Mallick of Intel, added a test for a valid PMBR.
+ * - Added kernel command line option 'gpt' to override valid PMBR test.
+ *
+ * Wed Jun 6 2001 Martin Wilck <Martin.Wilck@Fujitsu-Siemens.com>
+ * - added devfs volume UUID support (/dev/volumes/uuids) for
+ * mounting file systems by the partition GUID.
+ *
+ * Tue Dec 5 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Moved crc32() to linux/lib, added efi_crc32().
+ *
+ * Thu Nov 30 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Replaced Intel's CRC32 function with an equivalent
+ * non-license-restricted version.
+ *
+ * Wed Oct 25 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Fixed the last_lba() call to return the proper last block
+ *
+ * Thu Oct 12 2000 Matt Domsch <Matt_Domsch@dell.com>
+ * - Thanks to Andries Brouwer for his debugging assistance.
+ * - Code works, detects all the partitions.
+ *
+ ************************************************************/
+#include <linux/crc32.h>
+#include "check.h"
+#include "efi.h"
+
+/* This allows a kernel command line option 'gpt' to override
+ * the test for invalid PMBR. Not __initdata because reloading
+ * the partition tables happens after init too.
+ */
+static int force_gpt;
+static int __init
+force_gpt_fn(char *str)
+{
+ force_gpt = 1;
+ return 1;
+}
+__setup("gpt", force_gpt_fn);
+
+
+/**
+ * efi_crc32() - EFI version of crc32 function
+ * @buf: buffer to calculate crc32 of
+ * @len - length of buf
+ *
+ * Description: Returns EFI-style CRC32 value for @buf
+ *
+ * This function uses the little endian Ethernet polynomial
+ * but seeds the function with ~0, and xor's with ~0 at the end.
+ * Note, the EFI Specification, v1.02, has a reference to
+ * Dr. Dobbs Journal, May 1994 (actually it's in May 1992).
+ */
+static inline u32
+efi_crc32(const void *buf, unsigned long len)
+{
+ return (crc32(~0L, buf, len) ^ ~0L);
+}
+
+/**
+ * last_lba(): return number of last logical block of device
+ * @bdev: block device
+ *
+ * Description: Returns last LBA value on success, 0 on error.
+ * This is stored (by sd and ide-geometry) in
+ * the part[0] entry for this disk, and is the number of
+ * physical sectors available on the disk.
+ */
+static u64
+last_lba(struct block_device *bdev)
+{
+ if (!bdev || !bdev->bd_inode)
+ return 0;
+ return (bdev->bd_inode->i_size >> 9) - 1ULL;
+}
+
+static inline int
+pmbr_part_valid(struct partition *part)
+{
+ if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT &&
+ le32_to_cpu(part->start_sect) == 1UL)
+ return 1;
+ return 0;
+}
+
+/**
+ * is_pmbr_valid(): test Protective MBR for validity
+ * @mbr: pointer to a legacy mbr structure
+ *
+ * Description: Returns 1 if PMBR is valid, 0 otherwise.
+ * Validity depends on two things:
+ * 1) MSDOS signature is in the last two bytes of the MBR
+ * 2) One partition of type 0xEE is found
+ */
+static int
+is_pmbr_valid(legacy_mbr *mbr)
+{
+ int i;
+ if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE)
+ return 0;
+ for (i = 0; i < 4; i++)
+ if (pmbr_part_valid(&mbr->partition_record[i]))
+ return 1;
+ return 0;
+}
+
+/**
+ * read_lba(): Read bytes from disk, starting at given LBA
+ * @bdev
+ * @lba
+ * @buffer
+ * @size_t
+ *
+ * Description: Reads @count bytes from @bdev into @buffer.
+ * Returns number of bytes read on success, 0 on error.
+ */
+static size_t
+read_lba(struct block_device *bdev, u64 lba, u8 * buffer, size_t count)
+{
+ size_t totalreadcount = 0;
+
+ if (!bdev || !buffer || lba > last_lba(bdev))
+ return 0;
+
+ while (count) {
+ int copied = 512;
+ Sector sect;
+ unsigned char *data = read_dev_sector(bdev, lba++, &sect);
+ if (!data)
+ break;
+ if (copied > count)
+ copied = count;
+ memcpy(buffer, data, copied);
+ put_dev_sector(sect);
+ buffer += copied;
+ totalreadcount +=copied;
+ count -= copied;
+ }
+ return totalreadcount;
+}
+
+/**
+ * alloc_read_gpt_entries(): reads partition entries from disk
+ * @bdev
+ * @gpt - GPT header
+ *
+ * Description: Returns ptes on success, NULL on error.
+ * Allocates space for PTEs based on information found in @gpt.
+ * Notes: remember to free pte when you're done!
+ */
+static gpt_entry *
+alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
+{
+ size_t count;
+ gpt_entry *pte;
+ if (!bdev || !gpt)
+ return NULL;
+
+ count = le32_to_cpu(gpt->num_partition_entries) *
+ le32_to_cpu(gpt->sizeof_partition_entry);
+ if (!count)
+ return NULL;
+ pte = kzalloc(count, GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
+ (u8 *) pte,
+ count) < count) {
+ kfree(pte);
+ pte=NULL;
+ return NULL;
+ }
+ return pte;
+}
+
+/**
+ * alloc_read_gpt_header(): Allocates GPT header, reads into it from disk
+ * @bdev
+ * @lba is the Logical Block Address of the partition table
+ *
+ * Description: returns GPT header on success, NULL on error. Allocates
+ * and fills a GPT header starting at @ from @bdev.
+ * Note: remember to free gpt when finished with it.
+ */
+static gpt_header *
+alloc_read_gpt_header(struct block_device *bdev, u64 lba)
+{
+ gpt_header *gpt;
+ if (!bdev)
+ return NULL;
+
+ gpt = kzalloc(sizeof (gpt_header), GFP_KERNEL);
+ if (!gpt)
+ return NULL;
+
+ if (read_lba(bdev, lba, (u8 *) gpt,
+ sizeof (gpt_header)) < sizeof (gpt_header)) {
+ kfree(gpt);
+ gpt=NULL;
+ return NULL;
+ }
+
+ return gpt;
+}
+
+/**
+ * is_gpt_valid() - tests one GPT header and PTEs for validity
+ * @bdev
+ * @lba is the logical block address of the GPT header to test
+ * @gpt is a GPT header ptr, filled on return.
+ * @ptes is a PTEs ptr, filled on return.
+ *
+ * Description: returns 1 if valid, 0 on error.
+ * If valid, returns pointers to newly allocated GPT header and PTEs.
+ */
+static int
+is_gpt_valid(struct block_device *bdev, u64 lba,
+ gpt_header **gpt, gpt_entry **ptes)
+{
+ u32 crc, origcrc;
+ u64 lastlba;
+
+ if (!bdev || !gpt || !ptes)
+ return 0;
+ if (!(*gpt = alloc_read_gpt_header(bdev, lba)))
+ return 0;
+
+ /* Check the GUID Partition Table signature */
+ if (le64_to_cpu((*gpt)->signature) != GPT_HEADER_SIGNATURE) {
+ pr_debug("GUID Partition Table Header signature is wrong:"
+ "%lld != %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->signature),
+ (unsigned long long)GPT_HEADER_SIGNATURE);
+ goto fail;
+ }
+
+ /* Check the GUID Partition Table CRC */
+ origcrc = le32_to_cpu((*gpt)->header_crc32);
+ (*gpt)->header_crc32 = 0;
+ crc = efi_crc32((const unsigned char *) (*gpt), le32_to_cpu((*gpt)->header_size));
+
+ if (crc != origcrc) {
+ pr_debug("GUID Partition Table Header CRC is wrong: %x != %x\n",
+ crc, origcrc);
+ goto fail;
+ }
+ (*gpt)->header_crc32 = cpu_to_le32(origcrc);
+
+ /* Check that the my_lba entry points to the LBA that contains
+ * the GUID Partition Table */
+ if (le64_to_cpu((*gpt)->my_lba) != lba) {
+ pr_debug("GPT my_lba incorrect: %lld != %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->my_lba),
+ (unsigned long long)lba);
+ goto fail;
+ }
+
+ /* Check the first_usable_lba and last_usable_lba are
+ * within the disk.
+ */
+ lastlba = last_lba(bdev);
+ if (le64_to_cpu((*gpt)->first_usable_lba) > lastlba) {
+ pr_debug("GPT: first_usable_lba incorrect: %lld > %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->first_usable_lba),
+ (unsigned long long)lastlba);
+ goto fail;
+ }
+ if (le64_to_cpu((*gpt)->last_usable_lba) > lastlba) {
+ pr_debug("GPT: last_usable_lba incorrect: %lld > %lld\n",
+ (unsigned long long)le64_to_cpu((*gpt)->last_usable_lba),
+ (unsigned long long)lastlba);
+ goto fail;
+ }
+
+ if (!(*ptes = alloc_read_gpt_entries(bdev, *gpt)))
+ goto fail;
+
+ /* Check the GUID Partition Entry Array CRC */
+ crc = efi_crc32((const unsigned char *) (*ptes),
+ le32_to_cpu((*gpt)->num_partition_entries) *
+ le32_to_cpu((*gpt)->sizeof_partition_entry));
+
+ if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
+ pr_debug("GUID Partitition Entry Array CRC check failed.\n");
+ goto fail_ptes;
+ }
+
+ /* We're done, all's well */
+ return 1;
+
+ fail_ptes:
+ kfree(*ptes);
+ *ptes = NULL;
+ fail:
+ kfree(*gpt);
+ *gpt = NULL;
+ return 0;
+}
+
+/**
+ * is_pte_valid() - tests one PTE for validity
+ * @pte is the pte to check
+ * @lastlba is last lba of the disk
+ *
+ * Description: returns 1 if valid, 0 on error.
+ */
+static inline int
+is_pte_valid(const gpt_entry *pte, const u64 lastlba)
+{
+ if ((!efi_guidcmp(pte->partition_type_guid, NULL_GUID)) ||
+ le64_to_cpu(pte->starting_lba) > lastlba ||
+ le64_to_cpu(pte->ending_lba) > lastlba)
+ return 0;
+ return 1;
+}
+
+/**
+ * compare_gpts() - Search disk for valid GPT headers and PTEs
+ * @pgpt is the primary GPT header
+ * @agpt is the alternate GPT header
+ * @lastlba is the last LBA number
+ * Description: Returns nothing. Sanity checks pgpt and agpt fields
+ * and prints warnings on discrepancies.
+ *
+ */
+static void
+compare_gpts(gpt_header *pgpt, gpt_header *agpt, u64 lastlba)
+{
+ int error_found = 0;
+ if (!pgpt || !agpt)
+ return;
+ if (le64_to_cpu(pgpt->my_lba) != le64_to_cpu(agpt->alternate_lba)) {
+ printk(KERN_WARNING
+ "GPT:Primary header LBA != Alt. header alternate_lba\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->my_lba),
+ (unsigned long long)le64_to_cpu(agpt->alternate_lba));
+ error_found++;
+ }
+ if (le64_to_cpu(pgpt->alternate_lba) != le64_to_cpu(agpt->my_lba)) {
+ printk(KERN_WARNING
+ "GPT:Primary header alternate_lba != Alt. header my_lba\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->alternate_lba),
+ (unsigned long long)le64_to_cpu(agpt->my_lba));
+ error_found++;
+ }
+ if (le64_to_cpu(pgpt->first_usable_lba) !=
+ le64_to_cpu(agpt->first_usable_lba)) {
+ printk(KERN_WARNING "GPT:first_usable_lbas don't match.\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->first_usable_lba),
+ (unsigned long long)le64_to_cpu(agpt->first_usable_lba));
+ error_found++;
+ }
+ if (le64_to_cpu(pgpt->last_usable_lba) !=
+ le64_to_cpu(agpt->last_usable_lba)) {
+ printk(KERN_WARNING "GPT:last_usable_lbas don't match.\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->last_usable_lba),
+ (unsigned long long)le64_to_cpu(agpt->last_usable_lba));
+ error_found++;
+ }
+ if (efi_guidcmp(pgpt->disk_guid, agpt->disk_guid)) {
+ printk(KERN_WARNING "GPT:disk_guids don't match.\n");
+ error_found++;
+ }
+ if (le32_to_cpu(pgpt->num_partition_entries) !=
+ le32_to_cpu(agpt->num_partition_entries)) {
+ printk(KERN_WARNING "GPT:num_partition_entries don't match: "
+ "0x%x != 0x%x\n",
+ le32_to_cpu(pgpt->num_partition_entries),
+ le32_to_cpu(agpt->num_partition_entries));
+ error_found++;
+ }
+ if (le32_to_cpu(pgpt->sizeof_partition_entry) !=
+ le32_to_cpu(agpt->sizeof_partition_entry)) {
+ printk(KERN_WARNING
+ "GPT:sizeof_partition_entry values don't match: "
+ "0x%x != 0x%x\n",
+ le32_to_cpu(pgpt->sizeof_partition_entry),
+ le32_to_cpu(agpt->sizeof_partition_entry));
+ error_found++;
+ }
+ if (le32_to_cpu(pgpt->partition_entry_array_crc32) !=
+ le32_to_cpu(agpt->partition_entry_array_crc32)) {
+ printk(KERN_WARNING
+ "GPT:partition_entry_array_crc32 values don't match: "
+ "0x%x != 0x%x\n",
+ le32_to_cpu(pgpt->partition_entry_array_crc32),
+ le32_to_cpu(agpt->partition_entry_array_crc32));
+ error_found++;
+ }
+ if (le64_to_cpu(pgpt->alternate_lba) != lastlba) {
+ printk(KERN_WARNING
+ "GPT:Primary header thinks Alt. header is not at the end of the disk.\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(pgpt->alternate_lba),
+ (unsigned long long)lastlba);
+ error_found++;
+ }
+
+ if (le64_to_cpu(agpt->my_lba) != lastlba) {
+ printk(KERN_WARNING
+ "GPT:Alternate GPT header not at the end of the disk.\n");
+ printk(KERN_WARNING "GPT:%lld != %lld\n",
+ (unsigned long long)le64_to_cpu(agpt->my_lba),
+ (unsigned long long)lastlba);
+ error_found++;
+ }
+
+ if (error_found)
+ printk(KERN_WARNING
+ "GPT: Use GNU Parted to correct GPT errors.\n");
+ return;
+}
+
+/**
+ * find_valid_gpt() - Search disk for valid GPT headers and PTEs
+ * @bdev
+ * @gpt is a GPT header ptr, filled on return.
+ * @ptes is a PTEs ptr, filled on return.
+ * Description: Returns 1 if valid, 0 on error.
+ * If valid, returns pointers to newly allocated GPT header and PTEs.
+ * Validity depends on PMBR being valid (or being overridden by the
+ * 'gpt' kernel command line option) and finding either the Primary
+ * GPT header and PTEs valid, or the Alternate GPT header and PTEs
+ * valid. If the Primary GPT header is not valid, the Alternate GPT header
+ * is not checked unless the 'gpt' kernel command line option is passed.
+ * This protects against devices which misreport their size, and forces
+ * the user to decide to use the Alternate GPT.
+ */
+static int
+find_valid_gpt(struct block_device *bdev, gpt_header **gpt, gpt_entry **ptes)
+{
+ int good_pgpt = 0, good_agpt = 0, good_pmbr = 0;
+ gpt_header *pgpt = NULL, *agpt = NULL;
+ gpt_entry *pptes = NULL, *aptes = NULL;
+ legacy_mbr *legacymbr;
+ u64 lastlba;
+ if (!bdev || !gpt || !ptes)
+ return 0;
+
+ lastlba = last_lba(bdev);
+ if (!force_gpt) {
+ /* This will be added to the EFI Spec. per Intel after v1.02. */
+ legacymbr = kzalloc(sizeof (*legacymbr), GFP_KERNEL);
+ if (legacymbr) {
+ read_lba(bdev, 0, (u8 *) legacymbr,
+ sizeof (*legacymbr));
+ good_pmbr = is_pmbr_valid(legacymbr);
+ kfree(legacymbr);
+ }
+ if (!good_pmbr)
+ goto fail;
+ }
+
+ good_pgpt = is_gpt_valid(bdev, GPT_PRIMARY_PARTITION_TABLE_LBA,
+ &pgpt, &pptes);
+ if (good_pgpt)
+ good_agpt = is_gpt_valid(bdev,
+ le64_to_cpu(pgpt->alternate_lba),
+ &agpt, &aptes);
+ if (!good_agpt && force_gpt)
+ good_agpt = is_gpt_valid(bdev, lastlba,
+ &agpt, &aptes);
+
+ /* The obviously unsuccessful case */
+ if (!good_pgpt && !good_agpt)
+ goto fail;
+
+ compare_gpts(pgpt, agpt, lastlba);
+
+ /* The good cases */
+ if (good_pgpt) {
+ *gpt = pgpt;
+ *ptes = pptes;
+ kfree(agpt);
+ kfree(aptes);
+ if (!good_agpt) {
+ printk(KERN_WARNING
+ "Alternate GPT is invalid, "
+ "using primary GPT.\n");
+ }
+ return 1;
+ }
+ else if (good_agpt) {
+ *gpt = agpt;
+ *ptes = aptes;
+ kfree(pgpt);
+ kfree(pptes);
+ printk(KERN_WARNING
+ "Primary GPT is invalid, using alternate GPT.\n");
+ return 1;
+ }
+
+ fail:
+ kfree(pgpt);
+ kfree(agpt);
+ kfree(pptes);
+ kfree(aptes);
+ *gpt = NULL;
+ *ptes = NULL;
+ return 0;
+}
+
+/**
+ * efi_partition(struct parsed_partitions *state, struct block_device *bdev)
+ * @state
+ * @bdev
+ *
+ * Description: called from check.c, if the disk contains GPT
+ * partitions, sets up partition entries in the kernel.
+ *
+ * If the first block on the disk is a legacy MBR,
+ * it will get handled by msdos_partition().
+ * If it's a Protective MBR, we'll handle it here.
+ *
+ * We do not create a Linux partition for GPT, but
+ * only for the actual data partitions.
+ * Returns:
+ * -1 if unable to read the partition table
+ * 0 if this isn't our partition table
+ * 1 if successful
+ *
+ */
+int
+efi_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ gpt_header *gpt = NULL;
+ gpt_entry *ptes = NULL;
+ u32 i;
+
+ if (!find_valid_gpt(bdev, &gpt, &ptes) || !gpt || !ptes) {
+ kfree(gpt);
+ kfree(ptes);
+ return 0;
+ }
+
+ pr_debug("GUID Partition Table is valid! Yea!\n");
+
+ for (i = 0; i < le32_to_cpu(gpt->num_partition_entries) && i < state->limit-1; i++) {
+ if (!is_pte_valid(&ptes[i], last_lba(bdev)))
+ continue;
+
+ put_partition(state, i+1, le64_to_cpu(ptes[i].starting_lba),
+ (le64_to_cpu(ptes[i].ending_lba) -
+ le64_to_cpu(ptes[i].starting_lba) +
+ 1ULL));
+
+ /* If this is a RAID volume, tell md */
+ if (!efi_guidcmp(ptes[i].partition_type_guid,
+ PARTITION_LINUX_RAID_GUID))
+ state->parts[i+1].flags = 1;
+ }
+ kfree(ptes);
+ kfree(gpt);
+ printk("\n");
+ return 1;
+}
diff --git a/windhoek/partitions/efi.h b/windhoek/partitions/efi.h
new file mode 100644
index 00000000..2cc89d04
--- /dev/null
+++ b/windhoek/partitions/efi.h
@@ -0,0 +1,130 @@
+/************************************************************
+ * EFI GUID Partition Table
+ * Per Intel EFI Specification v1.02
+ * http://developer.intel.com/technology/efi/efi.htm
+ *
+ * By Matt Domsch <Matt_Domsch@dell.com> Fri Sep 22 22:15:56 CDT 2000
+ * Copyright 2000,2001 Dell Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ ************************************************************/
+
+#ifndef FS_PART_EFI_H_INCLUDED
+#define FS_PART_EFI_H_INCLUDED
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/efi.h>
+
+#define MSDOS_MBR_SIGNATURE 0xaa55
+#define EFI_PMBR_OSTYPE_EFI 0xEF
+#define EFI_PMBR_OSTYPE_EFI_GPT 0xEE
+
+#define GPT_BLOCK_SIZE 512
+#define GPT_HEADER_SIGNATURE 0x5452415020494645ULL
+#define GPT_HEADER_REVISION_V1 0x00010000
+#define GPT_PRIMARY_PARTITION_TABLE_LBA 1
+
+#define PARTITION_SYSTEM_GUID \
+ EFI_GUID( 0xC12A7328, 0xF81F, 0x11d2, \
+ 0xBA, 0x4B, 0x00, 0xA0, 0xC9, 0x3E, 0xC9, 0x3B)
+#define LEGACY_MBR_PARTITION_GUID \
+ EFI_GUID( 0x024DEE41, 0x33E7, 0x11d3, \
+ 0x9D, 0x69, 0x00, 0x08, 0xC7, 0x81, 0xF3, 0x9F)
+#define PARTITION_MSFT_RESERVED_GUID \
+ EFI_GUID( 0xE3C9E316, 0x0B5C, 0x4DB8, \
+ 0x81, 0x7D, 0xF9, 0x2D, 0xF0, 0x02, 0x15, 0xAE)
+#define PARTITION_BASIC_DATA_GUID \
+ EFI_GUID( 0xEBD0A0A2, 0xB9E5, 0x4433, \
+ 0x87, 0xC0, 0x68, 0xB6, 0xB7, 0x26, 0x99, 0xC7)
+#define PARTITION_LINUX_RAID_GUID \
+ EFI_GUID( 0xa19d880f, 0x05fc, 0x4d3b, \
+ 0xa0, 0x06, 0x74, 0x3f, 0x0f, 0x84, 0x91, 0x1e)
+#define PARTITION_LINUX_SWAP_GUID \
+ EFI_GUID( 0x0657fd6d, 0xa4ab, 0x43c4, \
+ 0x84, 0xe5, 0x09, 0x33, 0xc8, 0x4b, 0x4f, 0x4f)
+#define PARTITION_LINUX_LVM_GUID \
+ EFI_GUID( 0xe6d6d379, 0xf507, 0x44c2, \
+ 0xa2, 0x3c, 0x23, 0x8f, 0x2a, 0x3d, 0xf9, 0x28)
+
+typedef struct _gpt_header {
+ __le64 signature;
+ __le32 revision;
+ __le32 header_size;
+ __le32 header_crc32;
+ __le32 reserved1;
+ __le64 my_lba;
+ __le64 alternate_lba;
+ __le64 first_usable_lba;
+ __le64 last_usable_lba;
+ efi_guid_t disk_guid;
+ __le64 partition_entry_lba;
+ __le32 num_partition_entries;
+ __le32 sizeof_partition_entry;
+ __le32 partition_entry_array_crc32;
+ u8 reserved2[GPT_BLOCK_SIZE - 92];
+} __attribute__ ((packed)) gpt_header;
+
+typedef struct _gpt_entry_attributes {
+ u64 required_to_function:1;
+ u64 reserved:47;
+ u64 type_guid_specific:16;
+} __attribute__ ((packed)) gpt_entry_attributes;
+
+typedef struct _gpt_entry {
+ efi_guid_t partition_type_guid;
+ efi_guid_t unique_partition_guid;
+ __le64 starting_lba;
+ __le64 ending_lba;
+ gpt_entry_attributes attributes;
+ efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
+} __attribute__ ((packed)) gpt_entry;
+
+typedef struct _legacy_mbr {
+ u8 boot_code[440];
+ __le32 unique_mbr_signature;
+ __le16 unknown;
+ struct partition partition_record[4];
+ __le16 signature;
+} __attribute__ ((packed)) legacy_mbr;
+
+/* Functions */
+extern int efi_partition(struct parsed_partitions *state, struct block_device *bdev);
+
+#endif
+
+/*
+ * Overrides for Emacs so that we follow Linus's tabbing style.
+ * Emacs will notice this stuff at the end of the file and automatically
+ * adjust the settings for this buffer only. This must remain at the end
+ * of the file.
+ * --------------------------------------------------------------------------
+ * Local variables:
+ * c-indent-level: 4
+ * c-brace-imaginary-offset: 0
+ * c-brace-offset: -4
+ * c-argdecl-indent: 4
+ * c-label-offset: -4
+ * c-continued-statement-offset: 4
+ * c-continued-brace-offset: 0
+ * indent-tabs-mode: nil
+ * tab-width: 8
+ * End:
+ */
diff --git a/windhoek/partitions/ibm.c b/windhoek/partitions/ibm.c
new file mode 100644
index 00000000..1e064c4a
--- /dev/null
+++ b/windhoek/partitions/ibm.c
@@ -0,0 +1,220 @@
+/*
+ * File...........: linux/fs/partitions/ibm.c
+ * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
+ * Volker Sameske <sameske@de.ibm.com>
+ * Bugreports.to..: <Linux390@de.ibm.com>
+ * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/hdreg.h>
+#include <linux/slab.h>
+#include <asm/dasd.h>
+#include <asm/ebcdic.h>
+#include <asm/uaccess.h>
+#include <asm/vtoc.h>
+
+#include "check.h"
+#include "ibm.h"
+
+/*
+ * compute the block number from a
+ * cyl-cyl-head-head structure
+ */
+static inline int
+cchh2blk (struct vtoc_cchh *ptr, struct hd_geometry *geo) {
+ return ptr->cc * geo->heads * geo->sectors +
+ ptr->hh * geo->sectors;
+}
+
+/*
+ * compute the block number from a
+ * cyl-cyl-head-head-block structure
+ */
+static inline int
+cchhb2blk (struct vtoc_cchhb *ptr, struct hd_geometry *geo) {
+ return ptr->cc * geo->heads * geo->sectors +
+ ptr->hh * geo->sectors +
+ ptr->b;
+}
+
+/*
+ */
+int
+ibm_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ int blocksize, offset, size,res;
+ loff_t i_size;
+ dasd_information2_t *info;
+ struct hd_geometry *geo;
+ char type[5] = {0,};
+ char name[7] = {0,};
+ union label_t {
+ struct vtoc_volume_label vol;
+ struct vtoc_cms_label cms;
+ } *label;
+ unsigned char *data;
+ Sector sect;
+
+ res = 0;
+ blocksize = bdev_hardsect_size(bdev);
+ if (blocksize <= 0)
+ goto out_exit;
+ i_size = i_size_read(bdev->bd_inode);
+ if (i_size == 0)
+ goto out_exit;
+
+ info = kmalloc(sizeof(dasd_information2_t), GFP_KERNEL);
+ if (info == NULL)
+ goto out_exit;
+ geo = kmalloc(sizeof(struct hd_geometry), GFP_KERNEL);
+ if (geo == NULL)
+ goto out_nogeo;
+ label = kmalloc(sizeof(union label_t), GFP_KERNEL);
+ if (label == NULL)
+ goto out_nolab;
+
+ if (ioctl_by_bdev(bdev, BIODASDINFO2, (unsigned long)info) != 0 ||
+ ioctl_by_bdev(bdev, HDIO_GETGEO, (unsigned long)geo) != 0)
+ goto out_freeall;
+
+ /*
+ * Get volume label, extract name and type.
+ */
+ data = read_dev_sector(bdev, info->label_block*(blocksize/512), &sect);
+ if (data == NULL)
+ goto out_readerr;
+
+ strncpy (type, data, 4);
+ if ((!info->FBA_layout) && (!strcmp(info->type, "ECKD")))
+ strncpy(name, data + 8, 6);
+ else
+ strncpy(name, data + 4, 6);
+ memcpy(label, data, sizeof(union label_t));
+ put_dev_sector(sect);
+
+ EBCASC(type, 4);
+ EBCASC(name, 6);
+
+ res = 1;
+
+ /*
+ * Three different formats: LDL, CDL and unformated disk
+ *
+ * identified by info->format
+ *
+ * unformated disks we do not have to care about
+ */
+ if (info->format == DASD_FORMAT_LDL) {
+ if (strncmp(type, "CMS1", 4) == 0) {
+ /*
+ * VM style CMS1 labeled disk
+ */
+ if (label->cms.disk_offset != 0) {
+ printk("CMS1/%8s(MDSK):", name);
+ /* disk is reserved minidisk */
+ blocksize = label->cms.block_size;
+ offset = label->cms.disk_offset;
+ size = (label->cms.block_count - 1)
+ * (blocksize >> 9);
+ } else {
+ printk("CMS1/%8s:", name);
+ offset = (info->label_block + 1);
+ size = i_size >> 9;
+ }
+ } else {
+ /*
+ * Old style LNX1 or unlabeled disk
+ */
+ if (strncmp(type, "LNX1", 4) == 0)
+ printk ("LNX1/%8s:", name);
+ else
+ printk("(nonl)");
+ offset = (info->label_block + 1);
+ size = i_size >> 9;
+ }
+ put_partition(state, 1, offset*(blocksize >> 9),
+ size-offset*(blocksize >> 9));
+ } else if (info->format == DASD_FORMAT_CDL) {
+ /*
+ * New style CDL formatted disk
+ */
+ unsigned int blk;
+ int counter;
+
+ /*
+ * check if VOL1 label is available
+ * if not, something is wrong, skipping partition detection
+ */
+ if (strncmp(type, "VOL1", 4) == 0) {
+ printk("VOL1/%8s:", name);
+ /*
+ * get block number and read then go through format1
+ * labels
+ */
+ blk = cchhb2blk(&label->vol.vtoc, geo) + 1;
+ counter = 0;
+ data = read_dev_sector(bdev, blk * (blocksize/512),
+ &sect);
+ while (data != NULL) {
+ struct vtoc_format1_label f1;
+
+ memcpy(&f1, data,
+ sizeof(struct vtoc_format1_label));
+ put_dev_sector(sect);
+
+ /* skip FMT4 / FMT5 / FMT7 labels */
+ if (f1.DS1FMTID == _ascebc['4']
+ || f1.DS1FMTID == _ascebc['5']
+ || f1.DS1FMTID == _ascebc['7']) {
+ blk++;
+ data = read_dev_sector(bdev, blk *
+ (blocksize/512),
+ &sect);
+ continue;
+ }
+
+ /* only FMT1 valid at this point */
+ if (f1.DS1FMTID != _ascebc['1'])
+ break;
+
+ /* OK, we got valid partition data */
+ offset = cchh2blk(&f1.DS1EXT1.llimit, geo);
+ size = cchh2blk(&f1.DS1EXT1.ulimit, geo) -
+ offset + geo->sectors;
+ if (counter >= state->limit)
+ break;
+ put_partition(state, counter + 1,
+ offset * (blocksize >> 9),
+ size * (blocksize >> 9));
+ counter++;
+ blk++;
+ data = read_dev_sector(bdev,
+ blk * (blocksize/512),
+ &sect);
+ }
+
+ if (!data)
+ /* Are we not supposed to report this ? */
+ goto out_readerr;
+ } else
+ printk(KERN_WARNING "Warning, expected Label VOL1 not "
+ "found, treating as CDL formated Disk");
+
+ }
+
+ printk("\n");
+ goto out_freeall;
+
+
+out_readerr:
+ res = -1;
+out_freeall:
+ kfree(label);
+out_nolab:
+ kfree(geo);
+out_nogeo:
+ kfree(info);
+out_exit:
+ return res;
+}
diff --git a/windhoek/partitions/ibm.h b/windhoek/partitions/ibm.h
new file mode 100644
index 00000000..31f85a6a
--- /dev/null
+++ b/windhoek/partitions/ibm.h
@@ -0,0 +1 @@
+int ibm_partition(struct parsed_partitions *, struct block_device *);
diff --git a/windhoek/partitions/karma.c b/windhoek/partitions/karma.c
new file mode 100644
index 00000000..176d89bc
--- /dev/null
+++ b/windhoek/partitions/karma.c
@@ -0,0 +1,57 @@
+/*
+ * fs/partitions/karma.c
+ * Rio Karma partition info.
+ *
+ * Copyright (C) 2006 Bob Copeland (me@bobcopeland.com)
+ * based on osf.c
+ */
+
+#include "check.h"
+#include "karma.h"
+
+int karma_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ int i;
+ int slot = 1;
+ Sector sect;
+ unsigned char *data;
+ struct disklabel {
+ u8 d_reserved[270];
+ struct d_partition {
+ __le32 p_res;
+ u8 p_fstype;
+ u8 p_res2[3];
+ __le32 p_offset;
+ __le32 p_size;
+ } d_partitions[2];
+ u8 d_blank[208];
+ __le16 d_magic;
+ } __attribute__((packed)) *label;
+ struct d_partition *p;
+
+ data = read_dev_sector(bdev, 0, &sect);
+ if (!data)
+ return -1;
+
+ label = (struct disklabel *)data;
+ if (le16_to_cpu(label->d_magic) != KARMA_LABEL_MAGIC) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ p = label->d_partitions;
+ for (i = 0 ; i < 2; i++, p++) {
+ if (slot == state->limit)
+ break;
+
+ if (p->p_fstype == 0x4d && le32_to_cpu(p->p_size)) {
+ put_partition(state, slot, le32_to_cpu(p->p_offset),
+ le32_to_cpu(p->p_size));
+ }
+ slot++;
+ }
+ printk("\n");
+ put_dev_sector(sect);
+ return 1;
+}
+
diff --git a/windhoek/partitions/karma.h b/windhoek/partitions/karma.h
new file mode 100644
index 00000000..ecf7d3f2
--- /dev/null
+++ b/windhoek/partitions/karma.h
@@ -0,0 +1,8 @@
+/*
+ * fs/partitions/karma.h
+ */
+
+#define KARMA_LABEL_MAGIC 0xAB56
+
+int karma_partition(struct parsed_partitions *state, struct block_device *bdev);
+
diff --git a/windhoek/partitions/ldm.c b/windhoek/partitions/ldm.c
new file mode 100644
index 00000000..8652fb99
--- /dev/null
+++ b/windhoek/partitions/ldm.c
@@ -0,0 +1,1551 @@
+/**
+ * ldm - Support for Windows Logical Disk Manager (Dynamic Disks)
+ *
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * Documentation is available at http://www.linux-ntfs.org/content/view/19/37/
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program (in the main directory of the source in the file COPYING); if
+ * not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
+ * Boston, MA 02111-1307 USA
+ */
+
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/stringify.h>
+#include "ldm.h"
+#include "check.h"
+#include "msdos.h"
+
+/**
+ * ldm_debug/info/error/crit - Output an error message
+ * @f: A printf format string containing the message
+ * @...: Variables to substitute into @f
+ *
+ * ldm_debug() writes a DEBUG level message to the syslog but only if the
+ * driver was compiled with debug enabled. Otherwise, the call turns into a NOP.
+ */
+#ifndef CONFIG_LDM_DEBUG
+#define ldm_debug(...) do {} while (0)
+#else
+#define ldm_debug(f, a...) _ldm_printk (KERN_DEBUG, __func__, f, ##a)
+#endif
+
+#define ldm_crit(f, a...) _ldm_printk (KERN_CRIT, __func__, f, ##a)
+#define ldm_error(f, a...) _ldm_printk (KERN_ERR, __func__, f, ##a)
+#define ldm_info(f, a...) _ldm_printk (KERN_INFO, __func__, f, ##a)
+
+__attribute__ ((format (printf, 3, 4)))
+static void _ldm_printk (const char *level, const char *function,
+ const char *fmt, ...)
+{
+ static char buf[128];
+ va_list args;
+
+ va_start (args, fmt);
+ vsnprintf (buf, sizeof (buf), fmt, args);
+ va_end (args);
+
+ printk ("%s%s(): %s\n", level, function, buf);
+}
+
+/**
+ * ldm_parse_hexbyte - Convert a ASCII hex number to a byte
+ * @src: Pointer to at least 2 characters to convert.
+ *
+ * Convert a two character ASCII hex string to a number.
+ *
+ * Return: 0-255 Success, the byte was parsed correctly
+ * -1 Error, an invalid character was supplied
+ */
+static int ldm_parse_hexbyte (const u8 *src)
+{
+ unsigned int x; /* For correct wrapping */
+ int h;
+
+ /* high part */
+ if ((x = src[0] - '0') <= '9'-'0') h = x;
+ else if ((x = src[0] - 'a') <= 'f'-'a') h = x+10;
+ else if ((x = src[0] - 'A') <= 'F'-'A') h = x+10;
+ else return -1;
+ h <<= 4;
+
+ /* low part */
+ if ((x = src[1] - '0') <= '9'-'0') return h | x;
+ if ((x = src[1] - 'a') <= 'f'-'a') return h | (x+10);
+ if ((x = src[1] - 'A') <= 'F'-'A') return h | (x+10);
+ return -1;
+}
+
+/**
+ * ldm_parse_guid - Convert GUID from ASCII to binary
+ * @src: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @dest: Memory block to hold binary GUID (16 bytes)
+ *
+ * N.B. The GUID need not be NULL terminated.
+ *
+ * Return: 'true' @dest contains binary GUID
+ * 'false' @dest contents are undefined
+ */
+static bool ldm_parse_guid (const u8 *src, u8 *dest)
+{
+ static const int size[] = { 4, 2, 2, 2, 6 };
+ int i, j, v;
+
+ if (src[8] != '-' || src[13] != '-' ||
+ src[18] != '-' || src[23] != '-')
+ return false;
+
+ for (j = 0; j < 5; j++, src++)
+ for (i = 0; i < size[j]; i++, src+=2, *dest++ = v)
+ if ((v = ldm_parse_hexbyte (src)) < 0)
+ return false;
+
+ return true;
+}
+
+/**
+ * ldm_parse_privhead - Read the LDM Database PRIVHEAD structure
+ * @data: Raw database PRIVHEAD structure loaded from the device
+ * @ph: In-memory privhead structure in which to return parsed information
+ *
+ * This parses the LDM database PRIVHEAD structure supplied in @data and
+ * sets up the in-memory privhead structure @ph with the obtained information.
+ *
+ * Return: 'true' @ph contains the PRIVHEAD data
+ * 'false' @ph contents are undefined
+ */
+static bool ldm_parse_privhead(const u8 *data, struct privhead *ph)
+{
+ bool is_vista = false;
+
+ BUG_ON(!data || !ph);
+ if (MAGIC_PRIVHEAD != get_unaligned_be64(data)) {
+ ldm_error("Cannot find PRIVHEAD structure. LDM database is"
+ " corrupt. Aborting.");
+ return false;
+ }
+ ph->ver_major = get_unaligned_be16(data + 0x000C);
+ ph->ver_minor = get_unaligned_be16(data + 0x000E);
+ ph->logical_disk_start = get_unaligned_be64(data + 0x011B);
+ ph->logical_disk_size = get_unaligned_be64(data + 0x0123);
+ ph->config_start = get_unaligned_be64(data + 0x012B);
+ ph->config_size = get_unaligned_be64(data + 0x0133);
+ /* Version 2.11 is Win2k/XP and version 2.12 is Vista. */
+ if (ph->ver_major == 2 && ph->ver_minor == 12)
+ is_vista = true;
+ if (!is_vista && (ph->ver_major != 2 || ph->ver_minor != 11)) {
+ ldm_error("Expected PRIVHEAD version 2.11 or 2.12, got %d.%d."
+ " Aborting.", ph->ver_major, ph->ver_minor);
+ return false;
+ }
+ ldm_debug("PRIVHEAD version %d.%d (Windows %s).", ph->ver_major,
+ ph->ver_minor, is_vista ? "Vista" : "2000/XP");
+ if (ph->config_size != LDM_DB_SIZE) { /* 1 MiB in sectors. */
+ /* Warn the user and continue, carefully. */
+ ldm_info("Database is normally %u bytes, it claims to "
+ "be %llu bytes.", LDM_DB_SIZE,
+ (unsigned long long)ph->config_size);
+ }
+ if ((ph->logical_disk_size == 0) || (ph->logical_disk_start +
+ ph->logical_disk_size > ph->config_start)) {
+ ldm_error("PRIVHEAD disk size doesn't match real disk size");
+ return false;
+ }
+ if (!ldm_parse_guid(data + 0x0030, ph->disk_id)) {
+ ldm_error("PRIVHEAD contains an invalid GUID.");
+ return false;
+ }
+ ldm_debug("Parsed PRIVHEAD successfully.");
+ return true;
+}
+
+/**
+ * ldm_parse_tocblock - Read the LDM Database TOCBLOCK structure
+ * @data: Raw database TOCBLOCK structure loaded from the device
+ * @toc: In-memory toc structure in which to return parsed information
+ *
+ * This parses the LDM Database TOCBLOCK (table of contents) structure supplied
+ * in @data and sets up the in-memory tocblock structure @toc with the obtained
+ * information.
+ *
+ * N.B. The *_start and *_size values returned in @toc are not range-checked.
+ *
+ * Return: 'true' @toc contains the TOCBLOCK data
+ * 'false' @toc contents are undefined
+ */
+static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
+{
+ BUG_ON (!data || !toc);
+
+ if (MAGIC_TOCBLOCK != get_unaligned_be64(data)) {
+ ldm_crit ("Cannot find TOCBLOCK, database may be corrupt.");
+ return false;
+ }
+ strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name));
+ toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0;
+ toc->bitmap1_start = get_unaligned_be64(data + 0x2E);
+ toc->bitmap1_size = get_unaligned_be64(data + 0x36);
+
+ if (strncmp (toc->bitmap1_name, TOC_BITMAP1,
+ sizeof (toc->bitmap1_name)) != 0) {
+ ldm_crit ("TOCBLOCK's first bitmap is '%s', should be '%s'.",
+ TOC_BITMAP1, toc->bitmap1_name);
+ return false;
+ }
+ strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name));
+ toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0;
+ toc->bitmap2_start = get_unaligned_be64(data + 0x50);
+ toc->bitmap2_size = get_unaligned_be64(data + 0x58);
+ if (strncmp (toc->bitmap2_name, TOC_BITMAP2,
+ sizeof (toc->bitmap2_name)) != 0) {
+ ldm_crit ("TOCBLOCK's second bitmap is '%s', should be '%s'.",
+ TOC_BITMAP2, toc->bitmap2_name);
+ return false;
+ }
+ ldm_debug ("Parsed TOCBLOCK successfully.");
+ return true;
+}
+
+/**
+ * ldm_parse_vmdb - Read the LDM Database VMDB structure
+ * @data: Raw database VMDB structure loaded from the device
+ * @vm: In-memory vmdb structure in which to return parsed information
+ *
+ * This parses the LDM Database VMDB structure supplied in @data and sets up
+ * the in-memory vmdb structure @vm with the obtained information.
+ *
+ * N.B. The *_start, *_size and *_seq values will be range-checked later.
+ *
+ * Return: 'true' @vm contains VMDB info
+ * 'false' @vm contents are undefined
+ */
+static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
+{
+ BUG_ON (!data || !vm);
+
+ if (MAGIC_VMDB != get_unaligned_be32(data)) {
+ ldm_crit ("Cannot find the VMDB, database may be corrupt.");
+ return false;
+ }
+
+ vm->ver_major = get_unaligned_be16(data + 0x12);
+ vm->ver_minor = get_unaligned_be16(data + 0x14);
+ if ((vm->ver_major != 4) || (vm->ver_minor != 10)) {
+ ldm_error ("Expected VMDB version %d.%d, got %d.%d. "
+ "Aborting.", 4, 10, vm->ver_major, vm->ver_minor);
+ return false;
+ }
+
+ vm->vblk_size = get_unaligned_be32(data + 0x08);
+ vm->vblk_offset = get_unaligned_be32(data + 0x0C);
+ vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
+
+ ldm_debug ("Parsed VMDB successfully.");
+ return true;
+}
+
+/**
+ * ldm_compare_privheads - Compare two privhead objects
+ * @ph1: First privhead
+ * @ph2: Second privhead
+ *
+ * This compares the two privhead structures @ph1 and @ph2.
+ *
+ * Return: 'true' Identical
+ * 'false' Different
+ */
+static bool ldm_compare_privheads (const struct privhead *ph1,
+ const struct privhead *ph2)
+{
+ BUG_ON (!ph1 || !ph2);
+
+ return ((ph1->ver_major == ph2->ver_major) &&
+ (ph1->ver_minor == ph2->ver_minor) &&
+ (ph1->logical_disk_start == ph2->logical_disk_start) &&
+ (ph1->logical_disk_size == ph2->logical_disk_size) &&
+ (ph1->config_start == ph2->config_start) &&
+ (ph1->config_size == ph2->config_size) &&
+ !memcmp (ph1->disk_id, ph2->disk_id, GUID_SIZE));
+}
+
+/**
+ * ldm_compare_tocblocks - Compare two tocblock objects
+ * @toc1: First toc
+ * @toc2: Second toc
+ *
+ * This compares the two tocblock structures @toc1 and @toc2.
+ *
+ * Return: 'true' Identical
+ * 'false' Different
+ */
+static bool ldm_compare_tocblocks (const struct tocblock *toc1,
+ const struct tocblock *toc2)
+{
+ BUG_ON (!toc1 || !toc2);
+
+ return ((toc1->bitmap1_start == toc2->bitmap1_start) &&
+ (toc1->bitmap1_size == toc2->bitmap1_size) &&
+ (toc1->bitmap2_start == toc2->bitmap2_start) &&
+ (toc1->bitmap2_size == toc2->bitmap2_size) &&
+ !strncmp (toc1->bitmap1_name, toc2->bitmap1_name,
+ sizeof (toc1->bitmap1_name)) &&
+ !strncmp (toc1->bitmap2_name, toc2->bitmap2_name,
+ sizeof (toc1->bitmap2_name)));
+}
+
+/**
+ * ldm_validate_privheads - Compare the primary privhead with its backups
+ * @bdev: Device holding the LDM Database
+ * @ph1: Memory struct to fill with ph contents
+ *
+ * Read and compare all three privheads from disk.
+ *
+ * The privheads on disk show the size and location of the main disk area and
+ * the configuration area (the database). The values are range-checked against
+ * @hd, which contains the real size of the disk.
+ *
+ * Return: 'true' Success
+ * 'false' Error
+ */
+static bool ldm_validate_privheads (struct block_device *bdev,
+ struct privhead *ph1)
+{
+ static const int off[3] = { OFF_PRIV1, OFF_PRIV2, OFF_PRIV3 };
+ struct privhead *ph[3] = { ph1 };
+ Sector sect;
+ u8 *data;
+ bool result = false;
+ long num_sects;
+ int i;
+
+ BUG_ON (!bdev || !ph1);
+
+ ph[1] = kmalloc (sizeof (*ph[1]), GFP_KERNEL);
+ ph[2] = kmalloc (sizeof (*ph[2]), GFP_KERNEL);
+ if (!ph[1] || !ph[2]) {
+ ldm_crit ("Out of memory.");
+ goto out;
+ }
+
+ /* off[1 & 2] are relative to ph[0]->config_start */
+ ph[0]->config_start = 0;
+
+ /* Read and parse privheads */
+ for (i = 0; i < 3; i++) {
+ data = read_dev_sector (bdev,
+ ph[0]->config_start + off[i], &sect);
+ if (!data) {
+ ldm_crit ("Disk read failed.");
+ goto out;
+ }
+ result = ldm_parse_privhead (data, ph[i]);
+ put_dev_sector (sect);
+ if (!result) {
+ ldm_error ("Cannot find PRIVHEAD %d.", i+1); /* Log again */
+ if (i < 2)
+ goto out; /* Already logged */
+ else
+ break; /* FIXME ignore for now, 3rd PH can fail on odd-sized disks */
+ }
+ }
+
+ num_sects = bdev->bd_inode->i_size >> 9;
+
+ if ((ph[0]->config_start > num_sects) ||
+ ((ph[0]->config_start + ph[0]->config_size) > num_sects)) {
+ ldm_crit ("Database extends beyond the end of the disk.");
+ goto out;
+ }
+
+ if ((ph[0]->logical_disk_start > ph[0]->config_start) ||
+ ((ph[0]->logical_disk_start + ph[0]->logical_disk_size)
+ > ph[0]->config_start)) {
+ ldm_crit ("Disk and database overlap.");
+ goto out;
+ }
+
+ if (!ldm_compare_privheads (ph[0], ph[1])) {
+ ldm_crit ("Primary and backup PRIVHEADs don't match.");
+ goto out;
+ }
+ /* FIXME ignore this for now
+ if (!ldm_compare_privheads (ph[0], ph[2])) {
+ ldm_crit ("Primary and backup PRIVHEADs don't match.");
+ goto out;
+ }*/
+ ldm_debug ("Validated PRIVHEADs successfully.");
+ result = true;
+out:
+ kfree (ph[1]);
+ kfree (ph[2]);
+ return result;
+}
+
+/**
+ * ldm_validate_tocblocks - Validate the table of contents and its backups
+ * @bdev: Device holding the LDM Database
+ * @base: Offset, into @bdev, of the database
+ * @ldb: Cache of the database structures
+ *
+ * Find and compare the four tables of contents of the LDM Database stored on
+ * @bdev and return the parsed information into @toc1.
+ *
+ * The offsets and sizes of the configs are range-checked against a privhead.
+ *
+ * Return: 'true' @toc1 contains validated TOCBLOCK info
+ * 'false' @toc1 contents are undefined
+ */
+static bool ldm_validate_tocblocks(struct block_device *bdev,
+ unsigned long base, struct ldmdb *ldb)
+{
+ static const int off[4] = { OFF_TOCB1, OFF_TOCB2, OFF_TOCB3, OFF_TOCB4};
+ struct tocblock *tb[4];
+ struct privhead *ph;
+ Sector sect;
+ u8 *data;
+ int i, nr_tbs;
+ bool result = false;
+
+ BUG_ON(!bdev || !ldb);
+ ph = &ldb->ph;
+ tb[0] = &ldb->toc;
+ tb[1] = kmalloc(sizeof(*tb[1]) * 3, GFP_KERNEL);
+ if (!tb[1]) {
+ ldm_crit("Out of memory.");
+ goto err;
+ }
+ tb[2] = (struct tocblock*)((u8*)tb[1] + sizeof(*tb[1]));
+ tb[3] = (struct tocblock*)((u8*)tb[2] + sizeof(*tb[2]));
+ /*
+ * Try to read and parse all four TOCBLOCKs.
+ *
+ * Windows Vista LDM v2.12 does not always have all four TOCBLOCKs so
+ * skip any that fail as long as we get at least one valid TOCBLOCK.
+ */
+ for (nr_tbs = i = 0; i < 4; i++) {
+ data = read_dev_sector(bdev, base + off[i], &sect);
+ if (!data) {
+ ldm_error("Disk read failed for TOCBLOCK %d.", i);
+ continue;
+ }
+ if (ldm_parse_tocblock(data, tb[nr_tbs]))
+ nr_tbs++;
+ put_dev_sector(sect);
+ }
+ if (!nr_tbs) {
+ ldm_crit("Failed to find a valid TOCBLOCK.");
+ goto err;
+ }
+ /* Range check the TOCBLOCK against a privhead. */
+ if (((tb[0]->bitmap1_start + tb[0]->bitmap1_size) > ph->config_size) ||
+ ((tb[0]->bitmap2_start + tb[0]->bitmap2_size) >
+ ph->config_size)) {
+ ldm_crit("The bitmaps are out of range. Giving up.");
+ goto err;
+ }
+ /* Compare all loaded TOCBLOCKs. */
+ for (i = 1; i < nr_tbs; i++) {
+ if (!ldm_compare_tocblocks(tb[0], tb[i])) {
+ ldm_crit("TOCBLOCKs 0 and %d do not match.", i);
+ goto err;
+ }
+ }
+ ldm_debug("Validated %d TOCBLOCKs successfully.", nr_tbs);
+ result = true;
+err:
+ kfree(tb[1]);
+ return result;
+}
+
+/**
+ * ldm_validate_vmdb - Read the VMDB and validate it
+ * @bdev: Device holding the LDM Database
+ * @base: Offset, into @bdev, of the database
+ * @ldb: Cache of the database structures
+ *
+ * Find the vmdb of the LDM Database stored on @bdev and return the parsed
+ * information in @ldb.
+ *
+ * Return: 'true' @ldb contains validated VBDB info
+ * 'false' @ldb contents are undefined
+ */
+static bool ldm_validate_vmdb (struct block_device *bdev, unsigned long base,
+ struct ldmdb *ldb)
+{
+ Sector sect;
+ u8 *data;
+ bool result = false;
+ struct vmdb *vm;
+ struct tocblock *toc;
+
+ BUG_ON (!bdev || !ldb);
+
+ vm = &ldb->vm;
+ toc = &ldb->toc;
+
+ data = read_dev_sector (bdev, base + OFF_VMDB, &sect);
+ if (!data) {
+ ldm_crit ("Disk read failed.");
+ return false;
+ }
+
+ if (!ldm_parse_vmdb (data, vm))
+ goto out; /* Already logged */
+
+ /* Are there uncommitted transactions? */
+ if (get_unaligned_be16(data + 0x10) != 0x01) {
+ ldm_crit ("Database is not in a consistent state. Aborting.");
+ goto out;
+ }
+
+ if (vm->vblk_offset != 512)
+ ldm_info ("VBLKs start at offset 0x%04x.", vm->vblk_offset);
+
+ /*
+ * The last_vblkd_seq can be before the end of the vmdb, just make sure
+ * it is not out of bounds.
+ */
+ if ((vm->vblk_size * vm->last_vblk_seq) > (toc->bitmap1_size << 9)) {
+ ldm_crit ("VMDB exceeds allowed size specified by TOCBLOCK. "
+ "Database is corrupt. Aborting.");
+ goto out;
+ }
+
+ result = true;
+out:
+ put_dev_sector (sect);
+ return result;
+}
+
+
+/**
+ * ldm_validate_partition_table - Determine whether bdev might be a dynamic disk
+ * @bdev: Device holding the LDM Database
+ *
+ * This function provides a weak test to decide whether the device is a dynamic
+ * disk or not. It looks for an MS-DOS-style partition table containing at
+ * least one partition of type 0x42 (formerly SFS, now used by Windows for
+ * dynamic disks).
+ *
+ * N.B. The only possible error can come from the read_dev_sector and that is
+ * only likely to happen if the underlying device is strange. If that IS
+ * the case we should return zero to let someone else try.
+ *
+ * Return: 'true' @bdev is a dynamic disk
+ * 'false' @bdev is not a dynamic disk, or an error occurred
+ */
+static bool ldm_validate_partition_table (struct block_device *bdev)
+{
+ Sector sect;
+ u8 *data;
+ struct partition *p;
+ int i;
+ bool result = false;
+
+ BUG_ON (!bdev);
+
+ data = read_dev_sector (bdev, 0, &sect);
+ if (!data) {
+ ldm_crit ("Disk read failed.");
+ return false;
+ }
+
+ if (*(__le16*) (data + 0x01FE) != cpu_to_le16 (MSDOS_LABEL_MAGIC))
+ goto out;
+
+ p = (struct partition*)(data + 0x01BE);
+ for (i = 0; i < 4; i++, p++)
+ if (SYS_IND (p) == LDM_PARTITION) {
+ result = true;
+ break;
+ }
+
+ if (result)
+ ldm_debug ("Found W2K dynamic disk partition type.");
+
+out:
+ put_dev_sector (sect);
+ return result;
+}
+
+/**
+ * ldm_get_disk_objid - Search a linked list of vblk's for a given Disk Id
+ * @ldb: Cache of the database structures
+ *
+ * The LDM Database contains a list of all partitions on all dynamic disks.
+ * The primary PRIVHEAD, at the beginning of the physical disk, tells us
+ * the GUID of this disk. This function searches for the GUID in a linked
+ * list of vblk's.
+ *
+ * Return: Pointer, A matching vblk was found
+ * NULL, No match, or an error
+ */
+static struct vblk * ldm_get_disk_objid (const struct ldmdb *ldb)
+{
+ struct list_head *item;
+
+ BUG_ON (!ldb);
+
+ list_for_each (item, &ldb->v_disk) {
+ struct vblk *v = list_entry (item, struct vblk, list);
+ if (!memcmp (v->vblk.disk.disk_id, ldb->ph.disk_id, GUID_SIZE))
+ return v;
+ }
+
+ return NULL;
+}
+
+/**
+ * ldm_create_data_partitions - Create data partitions for this device
+ * @pp: List of the partitions parsed so far
+ * @ldb: Cache of the database structures
+ *
+ * The database contains ALL the partitions for ALL disk groups, so we need to
+ * filter out this specific disk. Using the disk's object id, we can find all
+ * the partitions in the database that belong to this disk.
+ *
+ * Add each partition in our database, to the parsed_partitions structure.
+ *
+ * N.B. This function creates the partitions in the order it finds partition
+ * objects in the linked list.
+ *
+ * Return: 'true' Partition created
+ * 'false' Error, probably a range checking problem
+ */
+static bool ldm_create_data_partitions (struct parsed_partitions *pp,
+ const struct ldmdb *ldb)
+{
+ struct list_head *item;
+ struct vblk *vb;
+ struct vblk *disk;
+ struct vblk_part *part;
+ int part_num = 1;
+
+ BUG_ON (!pp || !ldb);
+
+ disk = ldm_get_disk_objid (ldb);
+ if (!disk) {
+ ldm_crit ("Can't find the ID of this disk in the database.");
+ return false;
+ }
+
+ printk (" [LDM]");
+
+ /* Create the data partitions */
+ list_for_each (item, &ldb->v_part) {
+ vb = list_entry (item, struct vblk, list);
+ part = &vb->vblk.part;
+
+ if (part->disk_id != disk->obj_id)
+ continue;
+
+ put_partition (pp, part_num, ldb->ph.logical_disk_start +
+ part->start, part->size);
+ part_num++;
+ }
+
+ printk ("\n");
+ return true;
+}
+
+
+/**
+ * ldm_relative - Calculate the next relative offset
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @base: Size of the previous fixed width fields
+ * @offset: Cumulative size of the previous variable-width fields
+ *
+ * Because many of the VBLK fields are variable-width, it's necessary
+ * to calculate each offset based on the previous one and the length
+ * of the field it pointed to.
+ *
+ * Return: -1 Error, the calculated offset exceeded the size of the buffer
+ * n OK, a range-checked offset into buffer
+ */
+static int ldm_relative(const u8 *buffer, int buflen, int base, int offset)
+{
+
+ base += offset;
+ if (!buffer || offset < 0 || base > buflen) {
+ if (!buffer)
+ ldm_error("!buffer");
+ if (offset < 0)
+ ldm_error("offset (%d) < 0", offset);
+ if (base > buflen)
+ ldm_error("base (%d) > buflen (%d)", base, buflen);
+ return -1;
+ }
+ if (base + buffer[base] >= buflen) {
+ ldm_error("base (%d) + buffer[base] (%d) >= buflen (%d)", base,
+ buffer[base], buflen);
+ return -1;
+ }
+ return buffer[base] + offset + 1;
+}
+
+/**
+ * ldm_get_vnum - Convert a variable-width, big endian number, into cpu order
+ * @block: Pointer to the variable-width number to convert
+ *
+ * Large numbers in the LDM Database are often stored in a packed format. Each
+ * number is prefixed by a one byte width marker. All numbers in the database
+ * are stored in big-endian byte order. This function reads one of these
+ * numbers and returns the result
+ *
+ * N.B. This function DOES NOT perform any range checking, though the most
+ * it will read is eight bytes.
+ *
+ * Return: n A number
+ * 0 Zero, or an error occurred
+ */
+static u64 ldm_get_vnum (const u8 *block)
+{
+ u64 tmp = 0;
+ u8 length;
+
+ BUG_ON (!block);
+
+ length = *block++;
+
+ if (length && length <= 8)
+ while (length--)
+ tmp = (tmp << 8) | *block++;
+ else
+ ldm_error ("Illegal length %d.", length);
+
+ return tmp;
+}
+
+/**
+ * ldm_get_vstr - Read a length-prefixed string into a buffer
+ * @block: Pointer to the length marker
+ * @buffer: Location to copy string to
+ * @buflen: Size of the output buffer
+ *
+ * Many of the strings in the LDM Database are not NULL terminated. Instead
+ * they are prefixed by a one byte length marker. This function copies one of
+ * these strings into a buffer.
+ *
+ * N.B. This function DOES NOT perform any range checking on the input.
+ * If the buffer is too small, the output will be truncated.
+ *
+ * Return: 0, Error and @buffer contents are undefined
+ * n, String length in characters (excluding NULL)
+ * buflen-1, String was truncated.
+ */
+static int ldm_get_vstr (const u8 *block, u8 *buffer, int buflen)
+{
+ int length;
+
+ BUG_ON (!block || !buffer);
+
+ length = block[0];
+ if (length >= buflen) {
+ ldm_error ("Truncating string %d -> %d.", length, buflen);
+ length = buflen - 1;
+ }
+ memcpy (buffer, block + 1, length);
+ buffer[length] = 0;
+ return length;
+}
+
+
+/**
+ * ldm_parse_cmp3 - Read a raw VBLK Component object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Component object (version 3) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Component VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_cmp3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_vstate, r_child, r_parent, r_stripe, r_cols, len;
+ struct vblk_comp *comp;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+ r_vstate = ldm_relative (buffer, buflen, 0x18, r_name);
+ r_child = ldm_relative (buffer, buflen, 0x1D, r_vstate);
+ r_parent = ldm_relative (buffer, buflen, 0x2D, r_child);
+
+ if (buffer[0x12] & VBLK_FLAG_COMP_STRIPE) {
+ r_stripe = ldm_relative (buffer, buflen, 0x2E, r_parent);
+ r_cols = ldm_relative (buffer, buflen, 0x2E, r_stripe);
+ len = r_cols;
+ } else {
+ r_stripe = 0;
+ r_cols = 0;
+ len = r_parent;
+ }
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_CMP3;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ comp = &vb->vblk.comp;
+ ldm_get_vstr (buffer + 0x18 + r_name, comp->state,
+ sizeof (comp->state));
+ comp->type = buffer[0x18 + r_vstate];
+ comp->children = ldm_get_vnum (buffer + 0x1D + r_vstate);
+ comp->parent_id = ldm_get_vnum (buffer + 0x2D + r_child);
+ comp->chunksize = r_stripe ? ldm_get_vnum (buffer+r_parent+0x2E) : 0;
+
+ return true;
+}
+
+/**
+ * ldm_parse_dgr3 - Read a raw VBLK Disk Group object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk Group object (version 3) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Disk Group VBLK
+ * 'false' @vb contents are not defined
+ */
+static int ldm_parse_dgr3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_diskid, r_id1, r_id2, len;
+ struct vblk_dgrp *dgrp;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+ r_diskid = ldm_relative (buffer, buflen, 0x18, r_name);
+
+ if (buffer[0x12] & VBLK_FLAG_DGR3_IDS) {
+ r_id1 = ldm_relative (buffer, buflen, 0x24, r_diskid);
+ r_id2 = ldm_relative (buffer, buflen, 0x24, r_id1);
+ len = r_id2;
+ } else {
+ r_id1 = 0;
+ r_id2 = 0;
+ len = r_diskid;
+ }
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_DGR3;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ dgrp = &vb->vblk.dgrp;
+ ldm_get_vstr (buffer + 0x18 + r_name, dgrp->disk_id,
+ sizeof (dgrp->disk_id));
+ return true;
+}
+
+/**
+ * ldm_parse_dgr4 - Read a raw VBLK Disk Group object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk Group object (version 4) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Disk Group VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_dgr4 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ char buf[64];
+ int r_objid, r_name, r_id1, r_id2, len;
+ struct vblk_dgrp *dgrp;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+
+ if (buffer[0x12] & VBLK_FLAG_DGR4_IDS) {
+ r_id1 = ldm_relative (buffer, buflen, 0x44, r_name);
+ r_id2 = ldm_relative (buffer, buflen, 0x44, r_id1);
+ len = r_id2;
+ } else {
+ r_id1 = 0;
+ r_id2 = 0;
+ len = r_name;
+ }
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_DGR4;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ dgrp = &vb->vblk.dgrp;
+
+ ldm_get_vstr (buffer + 0x18 + r_objid, buf, sizeof (buf));
+ return true;
+}
+
+/**
+ * ldm_parse_dsk3 - Read a raw VBLK Disk object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk object (version 3) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Disk VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_dsk3 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_diskid, r_altname, len;
+ struct vblk_disk *disk;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+ r_diskid = ldm_relative (buffer, buflen, 0x18, r_name);
+ r_altname = ldm_relative (buffer, buflen, 0x18, r_diskid);
+ len = r_altname;
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_DSK3;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ disk = &vb->vblk.disk;
+ ldm_get_vstr (buffer + 0x18 + r_diskid, disk->alt_name,
+ sizeof (disk->alt_name));
+ if (!ldm_parse_guid (buffer + 0x19 + r_name, disk->disk_id))
+ return false;
+
+ return true;
+}
+
+/**
+ * ldm_parse_dsk4 - Read a raw VBLK Disk object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Disk object (version 4) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Disk VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, len;
+ struct vblk_disk *disk;
+
+ BUG_ON (!buffer || !vb);
+
+ r_objid = ldm_relative (buffer, buflen, 0x18, 0);
+ r_name = ldm_relative (buffer, buflen, 0x18, r_objid);
+ len = r_name;
+ if (len < 0)
+ return false;
+
+ len += VBLK_SIZE_DSK4;
+ if (len != get_unaligned_be32(buffer + 0x14))
+ return false;
+
+ disk = &vb->vblk.disk;
+ memcpy (disk->disk_id, buffer + 0x18 + r_name, GUID_SIZE);
+ return true;
+}
+
+/**
+ * ldm_parse_prt3 - Read a raw VBLK Partition object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Partition object (version 3) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Partition VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_prt3(const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_size, r_parent, r_diskid, r_index, len;
+ struct vblk_part *part;
+
+ BUG_ON(!buffer || !vb);
+ r_objid = ldm_relative(buffer, buflen, 0x18, 0);
+ if (r_objid < 0) {
+ ldm_error("r_objid %d < 0", r_objid);
+ return false;
+ }
+ r_name = ldm_relative(buffer, buflen, 0x18, r_objid);
+ if (r_name < 0) {
+ ldm_error("r_name %d < 0", r_name);
+ return false;
+ }
+ r_size = ldm_relative(buffer, buflen, 0x34, r_name);
+ if (r_size < 0) {
+ ldm_error("r_size %d < 0", r_size);
+ return false;
+ }
+ r_parent = ldm_relative(buffer, buflen, 0x34, r_size);
+ if (r_parent < 0) {
+ ldm_error("r_parent %d < 0", r_parent);
+ return false;
+ }
+ r_diskid = ldm_relative(buffer, buflen, 0x34, r_parent);
+ if (r_diskid < 0) {
+ ldm_error("r_diskid %d < 0", r_diskid);
+ return false;
+ }
+ if (buffer[0x12] & VBLK_FLAG_PART_INDEX) {
+ r_index = ldm_relative(buffer, buflen, 0x34, r_diskid);
+ if (r_index < 0) {
+ ldm_error("r_index %d < 0", r_index);
+ return false;
+ }
+ len = r_index;
+ } else {
+ r_index = 0;
+ len = r_diskid;
+ }
+ if (len < 0) {
+ ldm_error("len %d < 0", len);
+ return false;
+ }
+ len += VBLK_SIZE_PRT3;
+ if (len > get_unaligned_be32(buffer + 0x14)) {
+ ldm_error("len %d > BE32(buffer + 0x14) %d", len,
+ get_unaligned_be32(buffer + 0x14));
+ return false;
+ }
+ part = &vb->vblk.part;
+ part->start = get_unaligned_be64(buffer + 0x24 + r_name);
+ part->volume_offset = get_unaligned_be64(buffer + 0x2C + r_name);
+ part->size = ldm_get_vnum(buffer + 0x34 + r_name);
+ part->parent_id = ldm_get_vnum(buffer + 0x34 + r_size);
+ part->disk_id = ldm_get_vnum(buffer + 0x34 + r_parent);
+ if (vb->flags & VBLK_FLAG_PART_INDEX)
+ part->partnum = buffer[0x35 + r_diskid];
+ else
+ part->partnum = 0;
+ return true;
+}
+
+/**
+ * ldm_parse_vol5 - Read a raw VBLK Volume object into a vblk structure
+ * @buffer: Block of data being worked on
+ * @buflen: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK Volume object (version 5) into a vblk structure.
+ *
+ * Return: 'true' @vb contains a Volume VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_vol5(const u8 *buffer, int buflen, struct vblk *vb)
+{
+ int r_objid, r_name, r_vtype, r_disable_drive_letter, r_child, r_size;
+ int r_id1, r_id2, r_size2, r_drive, len;
+ struct vblk_volu *volu;
+
+ BUG_ON(!buffer || !vb);
+ r_objid = ldm_relative(buffer, buflen, 0x18, 0);
+ if (r_objid < 0) {
+ ldm_error("r_objid %d < 0", r_objid);
+ return false;
+ }
+ r_name = ldm_relative(buffer, buflen, 0x18, r_objid);
+ if (r_name < 0) {
+ ldm_error("r_name %d < 0", r_name);
+ return false;
+ }
+ r_vtype = ldm_relative(buffer, buflen, 0x18, r_name);
+ if (r_vtype < 0) {
+ ldm_error("r_vtype %d < 0", r_vtype);
+ return false;
+ }
+ r_disable_drive_letter = ldm_relative(buffer, buflen, 0x18, r_vtype);
+ if (r_disable_drive_letter < 0) {
+ ldm_error("r_disable_drive_letter %d < 0",
+ r_disable_drive_letter);
+ return false;
+ }
+ r_child = ldm_relative(buffer, buflen, 0x2D, r_disable_drive_letter);
+ if (r_child < 0) {
+ ldm_error("r_child %d < 0", r_child);
+ return false;
+ }
+ r_size = ldm_relative(buffer, buflen, 0x3D, r_child);
+ if (r_size < 0) {
+ ldm_error("r_size %d < 0", r_size);
+ return false;
+ }
+ if (buffer[0x12] & VBLK_FLAG_VOLU_ID1) {
+ r_id1 = ldm_relative(buffer, buflen, 0x52, r_size);
+ if (r_id1 < 0) {
+ ldm_error("r_id1 %d < 0", r_id1);
+ return false;
+ }
+ } else
+ r_id1 = r_size;
+ if (buffer[0x12] & VBLK_FLAG_VOLU_ID2) {
+ r_id2 = ldm_relative(buffer, buflen, 0x52, r_id1);
+ if (r_id2 < 0) {
+ ldm_error("r_id2 %d < 0", r_id2);
+ return false;
+ }
+ } else
+ r_id2 = r_id1;
+ if (buffer[0x12] & VBLK_FLAG_VOLU_SIZE) {
+ r_size2 = ldm_relative(buffer, buflen, 0x52, r_id2);
+ if (r_size2 < 0) {
+ ldm_error("r_size2 %d < 0", r_size2);
+ return false;
+ }
+ } else
+ r_size2 = r_id2;
+ if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE) {
+ r_drive = ldm_relative(buffer, buflen, 0x52, r_size2);
+ if (r_drive < 0) {
+ ldm_error("r_drive %d < 0", r_drive);
+ return false;
+ }
+ } else
+ r_drive = r_size2;
+ len = r_drive;
+ if (len < 0) {
+ ldm_error("len %d < 0", len);
+ return false;
+ }
+ len += VBLK_SIZE_VOL5;
+ if (len > get_unaligned_be32(buffer + 0x14)) {
+ ldm_error("len %d > BE32(buffer + 0x14) %d", len,
+ get_unaligned_be32(buffer + 0x14));
+ return false;
+ }
+ volu = &vb->vblk.volu;
+ ldm_get_vstr(buffer + 0x18 + r_name, volu->volume_type,
+ sizeof(volu->volume_type));
+ memcpy(volu->volume_state, buffer + 0x18 + r_disable_drive_letter,
+ sizeof(volu->volume_state));
+ volu->size = ldm_get_vnum(buffer + 0x3D + r_child);
+ volu->partition_type = buffer[0x41 + r_size];
+ memcpy(volu->guid, buffer + 0x42 + r_size, sizeof(volu->guid));
+ if (buffer[0x12] & VBLK_FLAG_VOLU_DRIVE) {
+ ldm_get_vstr(buffer + 0x52 + r_size, volu->drive_hint,
+ sizeof(volu->drive_hint));
+ }
+ return true;
+}
+
+/**
+ * ldm_parse_vblk - Read a raw VBLK object into a vblk structure
+ * @buf: Block of data being worked on
+ * @len: Size of the block of data
+ * @vb: In-memory vblk in which to return information
+ *
+ * Read a raw VBLK object into a vblk structure. This function just reads the
+ * information common to all VBLK types, then delegates the rest of the work to
+ * helper functions: ldm_parse_*.
+ *
+ * Return: 'true' @vb contains a VBLK
+ * 'false' @vb contents are not defined
+ */
+static bool ldm_parse_vblk (const u8 *buf, int len, struct vblk *vb)
+{
+ bool result = false;
+ int r_objid;
+
+ BUG_ON (!buf || !vb);
+
+ r_objid = ldm_relative (buf, len, 0x18, 0);
+ if (r_objid < 0) {
+ ldm_error ("VBLK header is corrupt.");
+ return false;
+ }
+
+ vb->flags = buf[0x12];
+ vb->type = buf[0x13];
+ vb->obj_id = ldm_get_vnum (buf + 0x18);
+ ldm_get_vstr (buf+0x18+r_objid, vb->name, sizeof (vb->name));
+
+ switch (vb->type) {
+ case VBLK_CMP3: result = ldm_parse_cmp3 (buf, len, vb); break;
+ case VBLK_DSK3: result = ldm_parse_dsk3 (buf, len, vb); break;
+ case VBLK_DSK4: result = ldm_parse_dsk4 (buf, len, vb); break;
+ case VBLK_DGR3: result = ldm_parse_dgr3 (buf, len, vb); break;
+ case VBLK_DGR4: result = ldm_parse_dgr4 (buf, len, vb); break;
+ case VBLK_PRT3: result = ldm_parse_prt3 (buf, len, vb); break;
+ case VBLK_VOL5: result = ldm_parse_vol5 (buf, len, vb); break;
+ }
+
+ if (result)
+ ldm_debug ("Parsed VBLK 0x%llx (type: 0x%02x) ok.",
+ (unsigned long long) vb->obj_id, vb->type);
+ else
+ ldm_error ("Failed to parse VBLK 0x%llx (type: 0x%02x).",
+ (unsigned long long) vb->obj_id, vb->type);
+
+ return result;
+}
+
+
+/**
+ * ldm_ldmdb_add - Adds a raw VBLK entry to the ldmdb database
+ * @data: Raw VBLK to add to the database
+ * @len: Size of the raw VBLK
+ * @ldb: Cache of the database structures
+ *
+ * The VBLKs are sorted into categories. Partitions are also sorted by offset.
+ *
+ * N.B. This function does not check the validity of the VBLKs.
+ *
+ * Return: 'true' The VBLK was added
+ * 'false' An error occurred
+ */
+static bool ldm_ldmdb_add (u8 *data, int len, struct ldmdb *ldb)
+{
+ struct vblk *vb;
+ struct list_head *item;
+
+ BUG_ON (!data || !ldb);
+
+ vb = kmalloc (sizeof (*vb), GFP_KERNEL);
+ if (!vb) {
+ ldm_crit ("Out of memory.");
+ return false;
+ }
+
+ if (!ldm_parse_vblk (data, len, vb)) {
+ kfree(vb);
+ return false; /* Already logged */
+ }
+
+ /* Put vblk into the correct list. */
+ switch (vb->type) {
+ case VBLK_DGR3:
+ case VBLK_DGR4:
+ list_add (&vb->list, &ldb->v_dgrp);
+ break;
+ case VBLK_DSK3:
+ case VBLK_DSK4:
+ list_add (&vb->list, &ldb->v_disk);
+ break;
+ case VBLK_VOL5:
+ list_add (&vb->list, &ldb->v_volu);
+ break;
+ case VBLK_CMP3:
+ list_add (&vb->list, &ldb->v_comp);
+ break;
+ case VBLK_PRT3:
+ /* Sort by the partition's start sector. */
+ list_for_each (item, &ldb->v_part) {
+ struct vblk *v = list_entry (item, struct vblk, list);
+ if ((v->vblk.part.disk_id == vb->vblk.part.disk_id) &&
+ (v->vblk.part.start > vb->vblk.part.start)) {
+ list_add_tail (&vb->list, &v->list);
+ return true;
+ }
+ }
+ list_add_tail (&vb->list, &ldb->v_part);
+ break;
+ }
+ return true;
+}
+
+/**
+ * ldm_frag_add - Add a VBLK fragment to a list
+ * @data: Raw fragment to be added to the list
+ * @size: Size of the raw fragment
+ * @frags: Linked list of VBLK fragments
+ *
+ * Fragmented VBLKs may not be consecutive in the database, so they are placed
+ * in a list so they can be pieced together later.
+ *
+ * Return: 'true' Success, the VBLK was added to the list
+ * 'false' Error, a problem occurred
+ */
+static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
+{
+ struct frag *f;
+ struct list_head *item;
+ int rec, num, group;
+
+ BUG_ON (!data || !frags);
+
+ group = get_unaligned_be32(data + 0x08);
+ rec = get_unaligned_be16(data + 0x0C);
+ num = get_unaligned_be16(data + 0x0E);
+ if ((num < 1) || (num > 4)) {
+ ldm_error ("A VBLK claims to have %d parts.", num);
+ return false;
+ }
+
+ list_for_each (item, frags) {
+ f = list_entry (item, struct frag, list);
+ if (f->group == group)
+ goto found;
+ }
+
+ f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
+ if (!f) {
+ ldm_crit ("Out of memory.");
+ return false;
+ }
+
+ f->group = group;
+ f->num = num;
+ f->rec = rec;
+ f->map = 0xFF << num;
+
+ list_add_tail (&f->list, frags);
+found:
+ if (f->map & (1 << rec)) {
+ ldm_error ("Duplicate VBLK, part %d.", rec);
+ f->map &= 0x7F; /* Mark the group as broken */
+ return false;
+ }
+
+ f->map |= (1 << rec);
+
+ if (num > 0) {
+ data += VBLK_SIZE_HEAD;
+ size -= VBLK_SIZE_HEAD;
+ }
+ memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size);
+
+ return true;
+}
+
+/**
+ * ldm_frag_free - Free a linked list of VBLK fragments
+ * @list: Linked list of fragments
+ *
+ * Free a linked list of VBLK fragments
+ *
+ * Return: none
+ */
+static void ldm_frag_free (struct list_head *list)
+{
+ struct list_head *item, *tmp;
+
+ BUG_ON (!list);
+
+ list_for_each_safe (item, tmp, list)
+ kfree (list_entry (item, struct frag, list));
+}
+
+/**
+ * ldm_frag_commit - Validate fragmented VBLKs and add them to the database
+ * @frags: Linked list of VBLK fragments
+ * @ldb: Cache of the database structures
+ *
+ * Now that all the fragmented VBLKs have been collected, they must be added to
+ * the database for later use.
+ *
+ * Return: 'true' All the fragments we added successfully
+ * 'false' One or more of the fragments we invalid
+ */
+static bool ldm_frag_commit (struct list_head *frags, struct ldmdb *ldb)
+{
+ struct frag *f;
+ struct list_head *item;
+
+ BUG_ON (!frags || !ldb);
+
+ list_for_each (item, frags) {
+ f = list_entry (item, struct frag, list);
+
+ if (f->map != 0xFF) {
+ ldm_error ("VBLK group %d is incomplete (0x%02x).",
+ f->group, f->map);
+ return false;
+ }
+
+ if (!ldm_ldmdb_add (f->data, f->num*ldb->vm.vblk_size, ldb))
+ return false; /* Already logged */
+ }
+ return true;
+}
+
+/**
+ * ldm_get_vblks - Read the on-disk database of VBLKs into memory
+ * @bdev: Device holding the LDM Database
+ * @base: Offset, into @bdev, of the database
+ * @ldb: Cache of the database structures
+ *
+ * To use the information from the VBLKs, they need to be read from the disk,
+ * unpacked and validated. We cache them in @ldb according to their type.
+ *
+ * Return: 'true' All the VBLKs were read successfully
+ * 'false' An error occurred
+ */
+static bool ldm_get_vblks (struct block_device *bdev, unsigned long base,
+ struct ldmdb *ldb)
+{
+ int size, perbuf, skip, finish, s, v, recs;
+ u8 *data = NULL;
+ Sector sect;
+ bool result = false;
+ LIST_HEAD (frags);
+
+ BUG_ON (!bdev || !ldb);
+
+ size = ldb->vm.vblk_size;
+ perbuf = 512 / size;
+ skip = ldb->vm.vblk_offset >> 9; /* Bytes to sectors */
+ finish = (size * ldb->vm.last_vblk_seq) >> 9;
+
+ for (s = skip; s < finish; s++) { /* For each sector */
+ data = read_dev_sector (bdev, base + OFF_VMDB + s, &sect);
+ if (!data) {
+ ldm_crit ("Disk read failed.");
+ goto out;
+ }
+
+ for (v = 0; v < perbuf; v++, data+=size) { /* For each vblk */
+ if (MAGIC_VBLK != get_unaligned_be32(data)) {
+ ldm_error ("Expected to find a VBLK.");
+ goto out;
+ }
+
+ recs = get_unaligned_be16(data + 0x0E); /* Number of records */
+ if (recs == 1) {
+ if (!ldm_ldmdb_add (data, size, ldb))
+ goto out; /* Already logged */
+ } else if (recs > 1) {
+ if (!ldm_frag_add (data, size, &frags))
+ goto out; /* Already logged */
+ }
+ /* else Record is not in use, ignore it. */
+ }
+ put_dev_sector (sect);
+ data = NULL;
+ }
+
+ result = ldm_frag_commit (&frags, ldb); /* Failures, already logged */
+out:
+ if (data)
+ put_dev_sector (sect);
+ ldm_frag_free (&frags);
+
+ return result;
+}
+
+/**
+ * ldm_free_vblks - Free a linked list of vblk's
+ * @lh: Head of a linked list of struct vblk
+ *
+ * Free a list of vblk's and free the memory used to maintain the list.
+ *
+ * Return: none
+ */
+static void ldm_free_vblks (struct list_head *lh)
+{
+ struct list_head *item, *tmp;
+
+ BUG_ON (!lh);
+
+ list_for_each_safe (item, tmp, lh)
+ kfree (list_entry (item, struct vblk, list));
+}
+
+
+/**
+ * ldm_partition - Find out whether a device is a dynamic disk and handle it
+ * @pp: List of the partitions parsed so far
+ * @bdev: Device holding the LDM Database
+ *
+ * This determines whether the device @bdev is a dynamic disk and if so creates
+ * the partitions necessary in the gendisk structure pointed to by @hd.
+ *
+ * We create a dummy device 1, which contains the LDM database, and then create
+ * each partition described by the LDM database in sequence as devices 2+. For
+ * example, if the device is hda, we would have: hda1: LDM database, hda2, hda3,
+ * and so on: the actual data containing partitions.
+ *
+ * Return: 1 Success, @bdev is a dynamic disk and we handled it
+ * 0 Success, @bdev is not a dynamic disk
+ * -1 An error occurred before enough information had been read
+ * Or @bdev is a dynamic disk, but it may be corrupted
+ */
+int ldm_partition (struct parsed_partitions *pp, struct block_device *bdev)
+{
+ struct ldmdb *ldb;
+ unsigned long base;
+ int result = -1;
+
+ BUG_ON (!pp || !bdev);
+
+ /* Look for signs of a Dynamic Disk */
+ if (!ldm_validate_partition_table (bdev))
+ return 0;
+
+ ldb = kmalloc (sizeof (*ldb), GFP_KERNEL);
+ if (!ldb) {
+ ldm_crit ("Out of memory.");
+ goto out;
+ }
+
+ /* Parse and check privheads. */
+ if (!ldm_validate_privheads (bdev, &ldb->ph))
+ goto out; /* Already logged */
+
+ /* All further references are relative to base (database start). */
+ base = ldb->ph.config_start;
+
+ /* Parse and check tocs and vmdb. */
+ if (!ldm_validate_tocblocks (bdev, base, ldb) ||
+ !ldm_validate_vmdb (bdev, base, ldb))
+ goto out; /* Already logged */
+
+ /* Initialize vblk lists in ldmdb struct */
+ INIT_LIST_HEAD (&ldb->v_dgrp);
+ INIT_LIST_HEAD (&ldb->v_disk);
+ INIT_LIST_HEAD (&ldb->v_volu);
+ INIT_LIST_HEAD (&ldb->v_comp);
+ INIT_LIST_HEAD (&ldb->v_part);
+
+ if (!ldm_get_vblks (bdev, base, ldb)) {
+ ldm_crit ("Failed to read the VBLKs from the database.");
+ goto cleanup;
+ }
+
+ /* Finally, create the data partition devices. */
+ if (ldm_create_data_partitions (pp, ldb)) {
+ ldm_debug ("Parsed LDM database successfully.");
+ result = 1;
+ }
+ /* else Already logged */
+
+cleanup:
+ ldm_free_vblks (&ldb->v_dgrp);
+ ldm_free_vblks (&ldb->v_disk);
+ ldm_free_vblks (&ldb->v_volu);
+ ldm_free_vblks (&ldb->v_comp);
+ ldm_free_vblks (&ldb->v_part);
+out:
+ kfree (ldb);
+ return result;
+}
diff --git a/windhoek/partitions/ldm.h b/windhoek/partitions/ldm.h
new file mode 100644
index 00000000..30e08e80
--- /dev/null
+++ b/windhoek/partitions/ldm.h
@@ -0,0 +1,215 @@
+/**
+ * ldm - Part of the Linux-NTFS project.
+ *
+ * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org>
+ * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com>
+ *
+ * Documentation is available at http://www.linux-ntfs.org/content/view/19/37/
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program (in the main directory of the Linux-NTFS source
+ * in the file COPYING); if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _FS_PT_LDM_H_
+#define _FS_PT_LDM_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <asm/unaligned.h>
+#include <asm/byteorder.h>
+
+struct parsed_partitions;
+
+/* Magic numbers in CPU format. */
+#define MAGIC_VMDB 0x564D4442 /* VMDB */
+#define MAGIC_VBLK 0x56424C4B /* VBLK */
+#define MAGIC_PRIVHEAD 0x5052495648454144ULL /* PRIVHEAD */
+#define MAGIC_TOCBLOCK 0x544F43424C4F434BULL /* TOCBLOCK */
+
+/* The defined vblk types. */
+#define VBLK_VOL5 0x51 /* Volume, version 5 */
+#define VBLK_CMP3 0x32 /* Component, version 3 */
+#define VBLK_PRT3 0x33 /* Partition, version 3 */
+#define VBLK_DSK3 0x34 /* Disk, version 3 */
+#define VBLK_DSK4 0x44 /* Disk, version 4 */
+#define VBLK_DGR3 0x35 /* Disk Group, version 3 */
+#define VBLK_DGR4 0x45 /* Disk Group, version 4 */
+
+/* vblk flags indicating extra information will be present */
+#define VBLK_FLAG_COMP_STRIPE 0x10
+#define VBLK_FLAG_PART_INDEX 0x08
+#define VBLK_FLAG_DGR3_IDS 0x08
+#define VBLK_FLAG_DGR4_IDS 0x08
+#define VBLK_FLAG_VOLU_ID1 0x08
+#define VBLK_FLAG_VOLU_ID2 0x20
+#define VBLK_FLAG_VOLU_SIZE 0x80
+#define VBLK_FLAG_VOLU_DRIVE 0x02
+
+/* size of a vblk's static parts */
+#define VBLK_SIZE_HEAD 16
+#define VBLK_SIZE_CMP3 22 /* Name and version */
+#define VBLK_SIZE_DGR3 12
+#define VBLK_SIZE_DGR4 44
+#define VBLK_SIZE_DSK3 12
+#define VBLK_SIZE_DSK4 45
+#define VBLK_SIZE_PRT3 28
+#define VBLK_SIZE_VOL5 58
+
+/* component types */
+#define COMP_STRIPE 0x01 /* Stripe-set */
+#define COMP_BASIC 0x02 /* Basic disk */
+#define COMP_RAID 0x03 /* Raid-set */
+
+/* Other constants. */
+#define LDM_DB_SIZE 2048 /* Size in sectors (= 1MiB). */
+
+#define OFF_PRIV1 6 /* Offset of the first privhead
+ relative to the start of the
+ device in sectors */
+
+/* Offsets to structures within the LDM Database in sectors. */
+#define OFF_PRIV2 1856 /* Backup private headers. */
+#define OFF_PRIV3 2047
+
+#define OFF_TOCB1 1 /* Tables of contents. */
+#define OFF_TOCB2 2
+#define OFF_TOCB3 2045
+#define OFF_TOCB4 2046
+
+#define OFF_VMDB 17 /* List of partitions. */
+
+#define LDM_PARTITION 0x42 /* Formerly SFS (Landis). */
+
+#define TOC_BITMAP1 "config" /* Names of the two defined */
+#define TOC_BITMAP2 "log" /* bitmaps in the TOCBLOCK. */
+
+/* Borrowed from msdos.c */
+#define SYS_IND(p) (get_unaligned(&(p)->sys_ind))
+
+struct frag { /* VBLK Fragment handling */
+ struct list_head list;
+ u32 group;
+ u8 num; /* Total number of records */
+ u8 rec; /* This is record number n */
+ u8 map; /* Which portions are in use */
+ u8 data[0];
+};
+
+/* In memory LDM database structures. */
+
+#define GUID_SIZE 16
+
+struct privhead { /* Offsets and sizes are in sectors. */
+ u16 ver_major;
+ u16 ver_minor;
+ u64 logical_disk_start;
+ u64 logical_disk_size;
+ u64 config_start;
+ u64 config_size;
+ u8 disk_id[GUID_SIZE];
+};
+
+struct tocblock { /* We have exactly two bitmaps. */
+ u8 bitmap1_name[16];
+ u64 bitmap1_start;
+ u64 bitmap1_size;
+ u8 bitmap2_name[16];
+ u64 bitmap2_start;
+ u64 bitmap2_size;
+};
+
+struct vmdb { /* VMDB: The database header */
+ u16 ver_major;
+ u16 ver_minor;
+ u32 vblk_size;
+ u32 vblk_offset;
+ u32 last_vblk_seq;
+};
+
+struct vblk_comp { /* VBLK Component */
+ u8 state[16];
+ u64 parent_id;
+ u8 type;
+ u8 children;
+ u16 chunksize;
+};
+
+struct vblk_dgrp { /* VBLK Disk Group */
+ u8 disk_id[64];
+};
+
+struct vblk_disk { /* VBLK Disk */
+ u8 disk_id[GUID_SIZE];
+ u8 alt_name[128];
+};
+
+struct vblk_part { /* VBLK Partition */
+ u64 start;
+ u64 size; /* start, size and vol_off in sectors */
+ u64 volume_offset;
+ u64 parent_id;
+ u64 disk_id;
+ u8 partnum;
+};
+
+struct vblk_volu { /* VBLK Volume */
+ u8 volume_type[16];
+ u8 volume_state[16];
+ u8 guid[16];
+ u8 drive_hint[4];
+ u64 size;
+ u8 partition_type;
+};
+
+struct vblk_head { /* VBLK standard header */
+ u32 group;
+ u16 rec;
+ u16 nrec;
+};
+
+struct vblk { /* Generalised VBLK */
+ u8 name[64];
+ u64 obj_id;
+ u32 sequence;
+ u8 flags;
+ u8 type;
+ union {
+ struct vblk_comp comp;
+ struct vblk_dgrp dgrp;
+ struct vblk_disk disk;
+ struct vblk_part part;
+ struct vblk_volu volu;
+ } vblk;
+ struct list_head list;
+};
+
+struct ldmdb { /* Cache of the database */
+ struct privhead ph;
+ struct tocblock toc;
+ struct vmdb vm;
+ struct list_head v_dgrp;
+ struct list_head v_disk;
+ struct list_head v_volu;
+ struct list_head v_comp;
+ struct list_head v_part;
+};
+
+int ldm_partition (struct parsed_partitions *state, struct block_device *bdev);
+
+#endif /* _FS_PT_LDM_H_ */
+
diff --git a/windhoek/partitions/mac.c b/windhoek/partitions/mac.c
new file mode 100644
index 00000000..d4a0fad3
--- /dev/null
+++ b/windhoek/partitions/mac.c
@@ -0,0 +1,132 @@
+/*
+ * fs/partitions/mac.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include <linux/ctype.h>
+#include "check.h"
+#include "mac.h"
+
+#ifdef CONFIG_PPC_PMAC
+#include <asm/machdep.h>
+extern void note_bootable_part(dev_t dev, int part, int goodness);
+#endif
+
+/*
+ * Code to understand MacOS partition tables.
+ */
+
+static inline void mac_fix_string(char *stg, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0 && stg[i] == ' '; i--)
+ stg[i] = 0;
+}
+
+int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ int slot = 1;
+ Sector sect;
+ unsigned char *data;
+ int blk, blocks_in_map;
+ unsigned secsize;
+#ifdef CONFIG_PPC_PMAC
+ int found_root = 0;
+ int found_root_goodness = 0;
+#endif
+ struct mac_partition *part;
+ struct mac_driver_desc *md;
+
+ /* Get 0th block and look at the first partition map entry. */
+ md = (struct mac_driver_desc *) read_dev_sector(bdev, 0, &sect);
+ if (!md)
+ return -1;
+ if (be16_to_cpu(md->signature) != MAC_DRIVER_MAGIC) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ secsize = be16_to_cpu(md->block_size);
+ put_dev_sector(sect);
+ data = read_dev_sector(bdev, secsize/512, &sect);
+ if (!data)
+ return -1;
+ part = (struct mac_partition *) (data + secsize%512);
+ if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) {
+ put_dev_sector(sect);
+ return 0; /* not a MacOS disk */
+ }
+ printk(" [mac]");
+ blocks_in_map = be32_to_cpu(part->map_count);
+ for (blk = 1; blk <= blocks_in_map; ++blk) {
+ int pos = blk * secsize;
+ put_dev_sector(sect);
+ data = read_dev_sector(bdev, pos/512, &sect);
+ if (!data)
+ return -1;
+ part = (struct mac_partition *) (data + pos%512);
+ if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC)
+ break;
+ put_partition(state, slot,
+ be32_to_cpu(part->start_block) * (secsize/512),
+ be32_to_cpu(part->block_count) * (secsize/512));
+
+ if (!strnicmp(part->type, "Linux_RAID", 10))
+ state->parts[slot].flags = 1;
+#ifdef CONFIG_PPC_PMAC
+ /*
+ * If this is the first bootable partition, tell the
+ * setup code, in case it wants to make this the root.
+ */
+ if (machine_is(powermac)) {
+ int goodness = 0;
+
+ mac_fix_string(part->processor, 16);
+ mac_fix_string(part->name, 32);
+ mac_fix_string(part->type, 32);
+
+ if ((be32_to_cpu(part->status) & MAC_STATUS_BOOTABLE)
+ && strcasecmp(part->processor, "powerpc") == 0)
+ goodness++;
+
+ if (strcasecmp(part->type, "Apple_UNIX_SVR2") == 0
+ || (strnicmp(part->type, "Linux", 5) == 0
+ && strcasecmp(part->type, "Linux_swap") != 0)) {
+ int i, l;
+
+ goodness++;
+ l = strlen(part->name);
+ if (strcmp(part->name, "/") == 0)
+ goodness++;
+ for (i = 0; i <= l - 4; ++i) {
+ if (strnicmp(part->name + i, "root",
+ 4) == 0) {
+ goodness += 2;
+ break;
+ }
+ }
+ if (strnicmp(part->name, "swap", 4) == 0)
+ goodness--;
+ }
+
+ if (goodness > found_root_goodness) {
+ found_root = blk;
+ found_root_goodness = goodness;
+ }
+ }
+#endif /* CONFIG_PPC_PMAC */
+
+ ++slot;
+ }
+#ifdef CONFIG_PPC_PMAC
+ if (found_root_goodness)
+ note_bootable_part(bdev->bd_dev, found_root, found_root_goodness);
+#endif
+
+ put_dev_sector(sect);
+ printk("\n");
+ return 1;
+}
diff --git a/windhoek/partitions/mac.h b/windhoek/partitions/mac.h
new file mode 100644
index 00000000..bbf26e13
--- /dev/null
+++ b/windhoek/partitions/mac.h
@@ -0,0 +1,44 @@
+/*
+ * fs/partitions/mac.h
+ */
+
+#define MAC_PARTITION_MAGIC 0x504d
+
+/* type field value for A/UX or other Unix partitions */
+#define APPLE_AUX_TYPE "Apple_UNIX_SVR2"
+
+struct mac_partition {
+ __be16 signature; /* expected to be MAC_PARTITION_MAGIC */
+ __be16 res1;
+ __be32 map_count; /* # blocks in partition map */
+ __be32 start_block; /* absolute starting block # of partition */
+ __be32 block_count; /* number of blocks in partition */
+ char name[32]; /* partition name */
+ char type[32]; /* string type description */
+ __be32 data_start; /* rel block # of first data block */
+ __be32 data_count; /* number of data blocks */
+ __be32 status; /* partition status bits */
+ __be32 boot_start;
+ __be32 boot_size;
+ __be32 boot_load;
+ __be32 boot_load2;
+ __be32 boot_entry;
+ __be32 boot_entry2;
+ __be32 boot_cksum;
+ char processor[16]; /* identifies ISA of boot */
+ /* there is more stuff after this that we don't need */
+};
+
+#define MAC_STATUS_BOOTABLE 8 /* partition is bootable */
+
+#define MAC_DRIVER_MAGIC 0x4552
+
+/* Driver descriptor structure, in block 0 */
+struct mac_driver_desc {
+ __be16 signature; /* expected to be MAC_DRIVER_MAGIC */
+ __be16 block_size;
+ __be32 block_count;
+ /* ... more stuff */
+};
+
+int mac_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/windhoek/partitions/msdos.c b/windhoek/partitions/msdos.c
new file mode 100644
index 00000000..79651188
--- /dev/null
+++ b/windhoek/partitions/msdos.c
@@ -0,0 +1,529 @@
+/*
+ * fs/partitions/msdos.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ * Copyright (C) 1991-1998 Linus Torvalds
+ *
+ * Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * in the early extended-partition checks and added DM partitions
+ *
+ * Support for DiskManager v6.0x added by Mark Lord,
+ * with information provided by OnTrack. This now works for linux fdisk
+ * and LILO, as well as loadlin and bootln. Note that disks other than
+ * /dev/hda *must* have a "DOS" type 0x51 partition in the first slot (hda1).
+ *
+ * More flexible handling of extended partitions - aeb, 950831
+ *
+ * Check partition table on IDE disks for common CHS translations
+ *
+ * Re-organised Feb 1998 Russell King
+ */
+#include <linux/msdos_fs.h>
+
+#include "check.h"
+#include "msdos.h"
+#include "efi.h"
+
+/*
+ * Many architectures don't like unaligned accesses, while
+ * the nr_sects and start_sect partition table entries are
+ * at a 2 (mod 4) address.
+ */
+#include <asm/unaligned.h>
+
+#define SYS_IND(p) (get_unaligned(&p->sys_ind))
+#define NR_SECTS(p) ({ __le32 __a = get_unaligned(&p->nr_sects); \
+ le32_to_cpu(__a); \
+ })
+
+#define START_SECT(p) ({ __le32 __a = get_unaligned(&p->start_sect); \
+ le32_to_cpu(__a); \
+ })
+
+static inline int is_extended_partition(struct partition *p)
+{
+ return (SYS_IND(p) == DOS_EXTENDED_PARTITION ||
+ SYS_IND(p) == WIN98_EXTENDED_PARTITION ||
+ SYS_IND(p) == LINUX_EXTENDED_PARTITION);
+}
+
+#define MSDOS_LABEL_MAGIC1 0x55
+#define MSDOS_LABEL_MAGIC2 0xAA
+
+static inline int
+msdos_magic_present(unsigned char *p)
+{
+ return (p[0] == MSDOS_LABEL_MAGIC1 && p[1] == MSDOS_LABEL_MAGIC2);
+}
+
+/* Value is EBCDIC 'IBMA' */
+#define AIX_LABEL_MAGIC1 0xC9
+#define AIX_LABEL_MAGIC2 0xC2
+#define AIX_LABEL_MAGIC3 0xD4
+#define AIX_LABEL_MAGIC4 0xC1
+static int aix_magic_present(unsigned char *p, struct block_device *bdev)
+{
+ struct partition *pt = (struct partition *) (p + 0x1be);
+ Sector sect;
+ unsigned char *d;
+ int slot, ret = 0;
+
+ if (!(p[0] == AIX_LABEL_MAGIC1 &&
+ p[1] == AIX_LABEL_MAGIC2 &&
+ p[2] == AIX_LABEL_MAGIC3 &&
+ p[3] == AIX_LABEL_MAGIC4))
+ return 0;
+ /* Assume the partition table is valid if Linux partitions exists */
+ for (slot = 1; slot <= 4; slot++, pt++) {
+ if (pt->sys_ind == LINUX_SWAP_PARTITION ||
+ pt->sys_ind == LINUX_RAID_PARTITION ||
+ pt->sys_ind == LINUX_DATA_PARTITION ||
+ pt->sys_ind == LINUX_LVM_PARTITION ||
+ is_extended_partition(pt))
+ return 0;
+ }
+ d = read_dev_sector(bdev, 7, &sect);
+ if (d) {
+ if (d[0] == '_' && d[1] == 'L' && d[2] == 'V' && d[3] == 'M')
+ ret = 1;
+ put_dev_sector(sect);
+ };
+ return ret;
+}
+
+/*
+ * Create devices for each logical partition in an extended partition.
+ * The logical partitions form a linked list, with each entry being
+ * a partition table with two entries. The first entry
+ * is the real data partition (with a start relative to the partition
+ * table start). The second is a pointer to the next logical partition
+ * (with a start relative to the entire extended partition).
+ * We do not create a Linux partition for the partition tables, but
+ * only for the actual data partitions.
+ */
+
+static void
+parse_extended(struct parsed_partitions *state, struct block_device *bdev,
+ u32 first_sector, u32 first_size)
+{
+ struct partition *p;
+ Sector sect;
+ unsigned char *data;
+ u32 this_sector, this_size;
+ int sector_size = bdev_hardsect_size(bdev) / 512;
+ int loopct = 0; /* number of links followed
+ without finding a data partition */
+ int i;
+
+ this_sector = first_sector;
+ this_size = first_size;
+
+ while (1) {
+ if (++loopct > 100)
+ return;
+ if (state->next == state->limit)
+ return;
+ data = read_dev_sector(bdev, this_sector, &sect);
+ if (!data)
+ return;
+
+ if (!msdos_magic_present(data + 510))
+ goto done;
+
+ p = (struct partition *) (data + 0x1be);
+
+ /*
+ * Usually, the first entry is the real data partition,
+ * the 2nd entry is the next extended partition, or empty,
+ * and the 3rd and 4th entries are unused.
+ * However, DRDOS sometimes has the extended partition as
+ * the first entry (when the data partition is empty),
+ * and OS/2 seems to use all four entries.
+ */
+
+ /*
+ * First process the data partition(s)
+ */
+ for (i=0; i<4; i++, p++) {
+ u32 offs, size, next;
+ if (!NR_SECTS(p) || is_extended_partition(p))
+ continue;
+
+ /* Check the 3rd and 4th entries -
+ these sometimes contain random garbage */
+ offs = START_SECT(p)*sector_size;
+ size = NR_SECTS(p)*sector_size;
+ next = this_sector + offs;
+ if (i >= 2) {
+ if (offs + size > this_size)
+ continue;
+ if (next < first_sector)
+ continue;
+ if (next + size > first_sector + first_size)
+ continue;
+ }
+
+ put_partition(state, state->next, next, size);
+ if (SYS_IND(p) == LINUX_RAID_PARTITION)
+ state->parts[state->next].flags = ADDPART_FLAG_RAID;
+ loopct = 0;
+ if (++state->next == state->limit)
+ goto done;
+ }
+ /*
+ * Next, process the (first) extended partition, if present.
+ * (So far, there seems to be no reason to make
+ * parse_extended() recursive and allow a tree
+ * of extended partitions.)
+ * It should be a link to the next logical partition.
+ */
+ p -= 4;
+ for (i=0; i<4; i++, p++)
+ if (NR_SECTS(p) && is_extended_partition(p))
+ break;
+ if (i == 4)
+ goto done; /* nothing left to do */
+
+ this_sector = first_sector + START_SECT(p) * sector_size;
+ this_size = NR_SECTS(p) * sector_size;
+ put_dev_sector(sect);
+ }
+done:
+ put_dev_sector(sect);
+}
+
+/* james@bpgc.com: Solaris has a nasty indicator: 0x82 which also
+ indicates linux swap. Be careful before believing this is Solaris. */
+
+static void
+parse_solaris_x86(struct parsed_partitions *state, struct block_device *bdev,
+ u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_SOLARIS_X86_PARTITION
+ Sector sect;
+ struct solaris_x86_vtoc *v;
+ int i;
+ short max_nparts;
+
+ v = (struct solaris_x86_vtoc *)read_dev_sector(bdev, offset+1, &sect);
+ if (!v)
+ return;
+ if (le32_to_cpu(v->v_sanity) != SOLARIS_X86_VTOC_SANE) {
+ put_dev_sector(sect);
+ return;
+ }
+ printk(" %s%d: <solaris:", state->name, origin);
+ if (le32_to_cpu(v->v_version) != 1) {
+ printk(" cannot handle version %d vtoc>\n",
+ le32_to_cpu(v->v_version));
+ put_dev_sector(sect);
+ return;
+ }
+ /* Ensure we can handle previous case of VTOC with 8 entries gracefully */
+ max_nparts = le16_to_cpu (v->v_nparts) > 8 ? SOLARIS_X86_NUMSLICE : 8;
+ for (i=0; i<max_nparts && state->next<state->limit; i++) {
+ struct solaris_x86_slice *s = &v->v_slice[i];
+ if (s->s_size == 0)
+ continue;
+ printk(" [s%d]", i);
+ /* solaris partitions are relative to current MS-DOS
+ * one; must add the offset of the current partition */
+ put_partition(state, state->next++,
+ le32_to_cpu(s->s_start)+offset,
+ le32_to_cpu(s->s_size));
+ }
+ put_dev_sector(sect);
+ printk(" >\n");
+#endif
+}
+
+#if defined(CONFIG_BSD_DISKLABEL)
+/*
+ * Create devices for BSD partitions listed in a disklabel, under a
+ * dos-like partition. See parse_extended() for more information.
+ */
+static void
+parse_bsd(struct parsed_partitions *state, struct block_device *bdev,
+ u32 offset, u32 size, int origin, char *flavour,
+ int max_partitions)
+{
+ Sector sect;
+ struct bsd_disklabel *l;
+ struct bsd_partition *p;
+
+ l = (struct bsd_disklabel *)read_dev_sector(bdev, offset+1, &sect);
+ if (!l)
+ return;
+ if (le32_to_cpu(l->d_magic) != BSD_DISKMAGIC) {
+ put_dev_sector(sect);
+ return;
+ }
+ printk(" %s%d: <%s:", state->name, origin, flavour);
+
+ if (le16_to_cpu(l->d_npartitions) < max_partitions)
+ max_partitions = le16_to_cpu(l->d_npartitions);
+ for (p = l->d_partitions; p - l->d_partitions < max_partitions; p++) {
+ u32 bsd_start, bsd_size;
+
+ if (state->next == state->limit)
+ break;
+ if (p->p_fstype == BSD_FS_UNUSED)
+ continue;
+ bsd_start = le32_to_cpu(p->p_offset);
+ bsd_size = le32_to_cpu(p->p_size);
+ if (offset == bsd_start && size == bsd_size)
+ /* full parent partition, we have it already */
+ continue;
+ if (offset > bsd_start || offset+size < bsd_start+bsd_size) {
+ printk("bad subpartition - ignored\n");
+ continue;
+ }
+ put_partition(state, state->next++, bsd_start, bsd_size);
+ }
+ put_dev_sector(sect);
+ if (le16_to_cpu(l->d_npartitions) > max_partitions)
+ printk(" (ignored %d more)",
+ le16_to_cpu(l->d_npartitions) - max_partitions);
+ printk(" >\n");
+}
+#endif
+
+static void
+parse_freebsd(struct parsed_partitions *state, struct block_device *bdev,
+ u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_BSD_DISKLABEL
+ parse_bsd(state, bdev, offset, size, origin,
+ "bsd", BSD_MAXPARTITIONS);
+#endif
+}
+
+static void
+parse_netbsd(struct parsed_partitions *state, struct block_device *bdev,
+ u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_BSD_DISKLABEL
+ parse_bsd(state, bdev, offset, size, origin,
+ "netbsd", BSD_MAXPARTITIONS);
+#endif
+}
+
+static void
+parse_openbsd(struct parsed_partitions *state, struct block_device *bdev,
+ u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_BSD_DISKLABEL
+ parse_bsd(state, bdev, offset, size, origin,
+ "openbsd", OPENBSD_MAXPARTITIONS);
+#endif
+}
+
+/*
+ * Create devices for Unixware partitions listed in a disklabel, under a
+ * dos-like partition. See parse_extended() for more information.
+ */
+static void
+parse_unixware(struct parsed_partitions *state, struct block_device *bdev,
+ u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_UNIXWARE_DISKLABEL
+ Sector sect;
+ struct unixware_disklabel *l;
+ struct unixware_slice *p;
+
+ l = (struct unixware_disklabel *)read_dev_sector(bdev, offset+29, &sect);
+ if (!l)
+ return;
+ if (le32_to_cpu(l->d_magic) != UNIXWARE_DISKMAGIC ||
+ le32_to_cpu(l->vtoc.v_magic) != UNIXWARE_DISKMAGIC2) {
+ put_dev_sector(sect);
+ return;
+ }
+ printk(" %s%d: <unixware:", state->name, origin);
+ p = &l->vtoc.v_slice[1];
+ /* I omit the 0th slice as it is the same as whole disk. */
+ while (p - &l->vtoc.v_slice[0] < UNIXWARE_NUMSLICE) {
+ if (state->next == state->limit)
+ break;
+
+ if (p->s_label != UNIXWARE_FS_UNUSED)
+ put_partition(state, state->next++,
+ START_SECT(p), NR_SECTS(p));
+ p++;
+ }
+ put_dev_sector(sect);
+ printk(" >\n");
+#endif
+}
+
+/*
+ * Minix 2.0.0/2.0.2 subpartition support.
+ * Anand Krishnamurthy <anandk@wiproge.med.ge.com>
+ * Rajeev V. Pillai <rajeevvp@yahoo.com>
+ */
+static void
+parse_minix(struct parsed_partitions *state, struct block_device *bdev,
+ u32 offset, u32 size, int origin)
+{
+#ifdef CONFIG_MINIX_SUBPARTITION
+ Sector sect;
+ unsigned char *data;
+ struct partition *p;
+ int i;
+
+ data = read_dev_sector(bdev, offset, &sect);
+ if (!data)
+ return;
+
+ p = (struct partition *)(data + 0x1be);
+
+ /* The first sector of a Minix partition can have either
+ * a secondary MBR describing its subpartitions, or
+ * the normal boot sector. */
+ if (msdos_magic_present (data + 510) &&
+ SYS_IND(p) == MINIX_PARTITION) { /* subpartition table present */
+
+ printk(" %s%d: <minix:", state->name, origin);
+ for (i = 0; i < MINIX_NR_SUBPARTITIONS; i++, p++) {
+ if (state->next == state->limit)
+ break;
+ /* add each partition in use */
+ if (SYS_IND(p) == MINIX_PARTITION)
+ put_partition(state, state->next++,
+ START_SECT(p), NR_SECTS(p));
+ }
+ printk(" >\n");
+ }
+ put_dev_sector(sect);
+#endif /* CONFIG_MINIX_SUBPARTITION */
+}
+
+static struct {
+ unsigned char id;
+ void (*parse)(struct parsed_partitions *, struct block_device *,
+ u32, u32, int);
+} subtypes[] = {
+ {FREEBSD_PARTITION, parse_freebsd},
+ {NETBSD_PARTITION, parse_netbsd},
+ {OPENBSD_PARTITION, parse_openbsd},
+ {MINIX_PARTITION, parse_minix},
+ {UNIXWARE_PARTITION, parse_unixware},
+ {SOLARIS_X86_PARTITION, parse_solaris_x86},
+ {NEW_SOLARIS_X86_PARTITION, parse_solaris_x86},
+ {0, NULL},
+};
+
+int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ int sector_size = bdev_hardsect_size(bdev) / 512;
+ Sector sect;
+ unsigned char *data;
+ struct partition *p;
+ struct fat_boot_sector *fb;
+ int slot;
+
+ data = read_dev_sector(bdev, 0, &sect);
+ if (!data)
+ return -1;
+ if (!msdos_magic_present(data + 510)) {
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ if (aix_magic_present(data, bdev)) {
+ put_dev_sector(sect);
+ printk( " [AIX]");
+ return 0;
+ }
+
+ /*
+ * Now that the 55aa signature is present, this is probably
+ * either the boot sector of a FAT filesystem or a DOS-type
+ * partition table. Reject this in case the boot indicator
+ * is not 0 or 0x80.
+ */
+ p = (struct partition *) (data + 0x1be);
+ for (slot = 1; slot <= 4; slot++, p++) {
+ if (p->boot_ind != 0 && p->boot_ind != 0x80) {
+ /*
+ * Even without a valid boot inidicator value
+ * its still possible this is valid FAT filesystem
+ * without a partition table.
+ */
+ fb = (struct fat_boot_sector *) data;
+ if (slot == 1 && fb->reserved && fb->fats
+ && fat_valid_media(fb->media)) {
+ printk("\n");
+ put_dev_sector(sect);
+ return 1;
+ } else {
+ put_dev_sector(sect);
+ return 0;
+ }
+ }
+ }
+
+#ifdef CONFIG_EFI_PARTITION
+ p = (struct partition *) (data + 0x1be);
+ for (slot = 1 ; slot <= 4 ; slot++, p++) {
+ /* If this is an EFI GPT disk, msdos should ignore it. */
+ if (SYS_IND(p) == EFI_PMBR_OSTYPE_EFI_GPT) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ }
+#endif
+ p = (struct partition *) (data + 0x1be);
+
+ /*
+ * Look for partitions in two passes:
+ * First find the primary and DOS-type extended partitions.
+ * On the second pass look inside *BSD, Unixware and Solaris partitions.
+ */
+
+ state->next = 5;
+ for (slot = 1 ; slot <= 4 ; slot++, p++) {
+ u32 start = START_SECT(p)*sector_size;
+ u32 size = NR_SECTS(p)*sector_size;
+ if (!size)
+ continue;
+ if (is_extended_partition(p)) {
+ /* prevent someone doing mkfs or mkswap on an
+ extended partition, but leave room for LILO */
+ put_partition(state, slot, start, size == 1 ? 1 : 2);
+ printk(" <");
+ parse_extended(state, bdev, start, size);
+ printk(" >");
+ continue;
+ }
+ put_partition(state, slot, start, size);
+ if (SYS_IND(p) == LINUX_RAID_PARTITION)
+ state->parts[slot].flags = 1;
+ if (SYS_IND(p) == DM6_PARTITION)
+ printk("[DM]");
+ if (SYS_IND(p) == EZD_PARTITION)
+ printk("[EZD]");
+ }
+
+ printk("\n");
+
+ /* second pass - output for each on a separate line */
+ p = (struct partition *) (0x1be + data);
+ for (slot = 1 ; slot <= 4 ; slot++, p++) {
+ unsigned char id = SYS_IND(p);
+ int n;
+
+ if (!NR_SECTS(p))
+ continue;
+
+ for (n = 0; subtypes[n].parse && id != subtypes[n].id; n++)
+ ;
+
+ if (!subtypes[n].parse)
+ continue;
+ subtypes[n].parse(state, bdev, START_SECT(p)*sector_size,
+ NR_SECTS(p)*sector_size, slot);
+ }
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/windhoek/partitions/msdos.h b/windhoek/partitions/msdos.h
new file mode 100644
index 00000000..01e5e0b6
--- /dev/null
+++ b/windhoek/partitions/msdos.h
@@ -0,0 +1,8 @@
+/*
+ * fs/partitions/msdos.h
+ */
+
+#define MSDOS_LABEL_MAGIC 0xAA55
+
+int msdos_partition(struct parsed_partitions *state, struct block_device *bdev);
+
diff --git a/windhoek/partitions/osf.c b/windhoek/partitions/osf.c
new file mode 100644
index 00000000..c05c17bc
--- /dev/null
+++ b/windhoek/partitions/osf.c
@@ -0,0 +1,78 @@
+/*
+ * fs/partitions/osf.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include "check.h"
+#include "osf.h"
+
+int osf_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ int i;
+ int slot = 1;
+ Sector sect;
+ unsigned char *data;
+ struct disklabel {
+ __le32 d_magic;
+ __le16 d_type,d_subtype;
+ u8 d_typename[16];
+ u8 d_packname[16];
+ __le32 d_secsize;
+ __le32 d_nsectors;
+ __le32 d_ntracks;
+ __le32 d_ncylinders;
+ __le32 d_secpercyl;
+ __le32 d_secprtunit;
+ __le16 d_sparespertrack;
+ __le16 d_sparespercyl;
+ __le32 d_acylinders;
+ __le16 d_rpm, d_interleave, d_trackskew, d_cylskew;
+ __le32 d_headswitch, d_trkseek, d_flags;
+ __le32 d_drivedata[5];
+ __le32 d_spare[5];
+ __le32 d_magic2;
+ __le16 d_checksum;
+ __le16 d_npartitions;
+ __le32 d_bbsize, d_sbsize;
+ struct d_partition {
+ __le32 p_size;
+ __le32 p_offset;
+ __le32 p_fsize;
+ u8 p_fstype;
+ u8 p_frag;
+ __le16 p_cpg;
+ } d_partitions[8];
+ } * label;
+ struct d_partition * partition;
+
+ data = read_dev_sector(bdev, 0, &sect);
+ if (!data)
+ return -1;
+
+ label = (struct disklabel *) (data+64);
+ partition = label->d_partitions;
+ if (le32_to_cpu(label->d_magic) != DISKLABELMAGIC) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ if (le32_to_cpu(label->d_magic2) != DISKLABELMAGIC) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) {
+ if (slot == state->limit)
+ break;
+ if (le32_to_cpu(partition->p_size))
+ put_partition(state, slot,
+ le32_to_cpu(partition->p_offset),
+ le32_to_cpu(partition->p_size));
+ slot++;
+ }
+ printk("\n");
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/windhoek/partitions/osf.h b/windhoek/partitions/osf.h
new file mode 100644
index 00000000..427b8eab
--- /dev/null
+++ b/windhoek/partitions/osf.h
@@ -0,0 +1,7 @@
+/*
+ * fs/partitions/osf.h
+ */
+
+#define DISKLABELMAGIC (0x82564557UL)
+
+int osf_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/windhoek/partitions/sgi.c b/windhoek/partitions/sgi.c
new file mode 100644
index 00000000..ed5ac83f
--- /dev/null
+++ b/windhoek/partitions/sgi.c
@@ -0,0 +1,82 @@
+/*
+ * fs/partitions/sgi.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ */
+
+#include "check.h"
+#include "sgi.h"
+
+struct sgi_disklabel {
+ __be32 magic_mushroom; /* Big fat spliff... */
+ __be16 root_part_num; /* Root partition number */
+ __be16 swap_part_num; /* Swap partition number */
+ s8 boot_file[16]; /* Name of boot file for ARCS */
+ u8 _unused0[48]; /* Device parameter useless crapola.. */
+ struct sgi_volume {
+ s8 name[8]; /* Name of volume */
+ __be32 block_num; /* Logical block number */
+ __be32 num_bytes; /* How big, in bytes */
+ } volume[15];
+ struct sgi_partition {
+ __be32 num_blocks; /* Size in logical blocks */
+ __be32 first_block; /* First logical block */
+ __be32 type; /* Type of this partition */
+ } partitions[16];
+ __be32 csum; /* Disk label checksum */
+ __be32 _unused1; /* Padding */
+};
+
+int sgi_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ int i, csum;
+ __be32 magic;
+ int slot = 1;
+ unsigned int start, blocks;
+ __be32 *ui, cs;
+ Sector sect;
+ struct sgi_disklabel *label;
+ struct sgi_partition *p;
+ char b[BDEVNAME_SIZE];
+
+ label = (struct sgi_disklabel *) read_dev_sector(bdev, 0, &sect);
+ if (!label)
+ return -1;
+ p = &label->partitions[0];
+ magic = label->magic_mushroom;
+ if(be32_to_cpu(magic) != SGI_LABEL_MAGIC) {
+ /*printk("Dev %s SGI disklabel: bad magic %08x\n",
+ bdevname(bdev, b), be32_to_cpu(magic));*/
+ put_dev_sector(sect);
+ return 0;
+ }
+ ui = ((__be32 *) (label + 1)) - 1;
+ for(csum = 0; ui >= ((__be32 *) label);) {
+ cs = *ui--;
+ csum += be32_to_cpu(cs);
+ }
+ if(csum) {
+ printk(KERN_WARNING "Dev %s SGI disklabel: csum bad, label corrupted\n",
+ bdevname(bdev, b));
+ put_dev_sector(sect);
+ return 0;
+ }
+ /* All SGI disk labels have 16 partitions, disks under Linux only
+ * have 15 minor's. Luckily there are always a few zero length
+ * partitions which we don't care about so we never overflow the
+ * current_minor.
+ */
+ for(i = 0; i < 16; i++, p++) {
+ blocks = be32_to_cpu(p->num_blocks);
+ start = be32_to_cpu(p->first_block);
+ if (blocks) {
+ put_partition(state, slot, start, blocks);
+ if (be32_to_cpu(p->type) == LINUX_RAID_PARTITION)
+ state->parts[slot].flags = ADDPART_FLAG_RAID;
+ }
+ slot++;
+ }
+ printk("\n");
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/windhoek/partitions/sgi.h b/windhoek/partitions/sgi.h
new file mode 100644
index 00000000..5d5595c0
--- /dev/null
+++ b/windhoek/partitions/sgi.h
@@ -0,0 +1,8 @@
+/*
+ * fs/partitions/sgi.h
+ */
+
+extern int sgi_partition(struct parsed_partitions *state, struct block_device *bdev);
+
+#define SGI_LABEL_MAGIC 0x0be5a941
+
diff --git a/windhoek/partitions/sun.c b/windhoek/partitions/sun.c
new file mode 100644
index 00000000..c95e6a62
--- /dev/null
+++ b/windhoek/partitions/sun.c
@@ -0,0 +1,122 @@
+/*
+ * fs/partitions/sun.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Copyright (C) 1991-1998 Linus Torvalds
+ * Re-organised Feb 1998 Russell King
+ */
+
+#include "check.h"
+#include "sun.h"
+
+int sun_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ int i;
+ __be16 csum;
+ int slot = 1;
+ __be16 *ush;
+ Sector sect;
+ struct sun_disklabel {
+ unsigned char info[128]; /* Informative text string */
+ struct sun_vtoc {
+ __be32 version; /* Layout version */
+ char volume[8]; /* Volume name */
+ __be16 nparts; /* Number of partitions */
+ struct sun_info { /* Partition hdrs, sec 2 */
+ __be16 id;
+ __be16 flags;
+ } infos[8];
+ __be16 padding; /* Alignment padding */
+ __be32 bootinfo[3]; /* Info needed by mboot */
+ __be32 sanity; /* To verify vtoc sanity */
+ __be32 reserved[10]; /* Free space */
+ __be32 timestamp[8]; /* Partition timestamp */
+ } vtoc;
+ __be32 write_reinstruct; /* sectors to skip, writes */
+ __be32 read_reinstruct; /* sectors to skip, reads */
+ unsigned char spare[148]; /* Padding */
+ __be16 rspeed; /* Disk rotational speed */
+ __be16 pcylcount; /* Physical cylinder count */
+ __be16 sparecyl; /* extra sects per cylinder */
+ __be16 obs1; /* gap1 */
+ __be16 obs2; /* gap2 */
+ __be16 ilfact; /* Interleave factor */
+ __be16 ncyl; /* Data cylinder count */
+ __be16 nacyl; /* Alt. cylinder count */
+ __be16 ntrks; /* Tracks per cylinder */
+ __be16 nsect; /* Sectors per track */
+ __be16 obs3; /* bhead - Label head offset */
+ __be16 obs4; /* ppart - Physical Partition */
+ struct sun_partition {
+ __be32 start_cylinder;
+ __be32 num_sectors;
+ } partitions[8];
+ __be16 magic; /* Magic number */
+ __be16 csum; /* Label xor'd checksum */
+ } * label;
+ struct sun_partition *p;
+ unsigned long spc;
+ char b[BDEVNAME_SIZE];
+ int use_vtoc;
+ int nparts;
+
+ label = (struct sun_disklabel *)read_dev_sector(bdev, 0, &sect);
+ if (!label)
+ return -1;
+
+ p = label->partitions;
+ if (be16_to_cpu(label->magic) != SUN_LABEL_MAGIC) {
+/* printk(KERN_INFO "Dev %s Sun disklabel: bad magic %04x\n",
+ bdevname(bdev, b), be16_to_cpu(label->magic)); */
+ put_dev_sector(sect);
+ return 0;
+ }
+ /* Look at the checksum */
+ ush = ((__be16 *) (label+1)) - 1;
+ for (csum = 0; ush >= ((__be16 *) label);)
+ csum ^= *ush--;
+ if (csum) {
+ printk("Dev %s Sun disklabel: Csum bad, label corrupted\n",
+ bdevname(bdev, b));
+ put_dev_sector(sect);
+ return 0;
+ }
+
+ /* Check to see if we can use the VTOC table */
+ use_vtoc = ((be32_to_cpu(label->vtoc.sanity) == SUN_VTOC_SANITY) &&
+ (be32_to_cpu(label->vtoc.version) == 1) &&
+ (be16_to_cpu(label->vtoc.nparts) <= 8));
+
+ /* Use 8 partition entries if not specified in validated VTOC */
+ nparts = (use_vtoc) ? be16_to_cpu(label->vtoc.nparts) : 8;
+
+ /*
+ * So that old Linux-Sun partitions continue to work,
+ * alow the VTOC to be used under the additional condition ...
+ */
+ use_vtoc = use_vtoc || !(label->vtoc.sanity ||
+ label->vtoc.version || label->vtoc.nparts);
+ spc = be16_to_cpu(label->ntrks) * be16_to_cpu(label->nsect);
+ for (i = 0; i < nparts; i++, p++) {
+ unsigned long st_sector;
+ unsigned int num_sectors;
+
+ st_sector = be32_to_cpu(p->start_cylinder) * spc;
+ num_sectors = be32_to_cpu(p->num_sectors);
+ if (num_sectors) {
+ put_partition(state, slot, st_sector, num_sectors);
+ state->parts[slot].flags = 0;
+ if (use_vtoc) {
+ if (be16_to_cpu(label->vtoc.infos[i].id) == LINUX_RAID_PARTITION)
+ state->parts[slot].flags |= ADDPART_FLAG_RAID;
+ else if (be16_to_cpu(label->vtoc.infos[i].id) == SUN_WHOLE_DISK)
+ state->parts[slot].flags |= ADDPART_FLAG_WHOLEDISK;
+ }
+ }
+ slot++;
+ }
+ printk("\n");
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/windhoek/partitions/sun.h b/windhoek/partitions/sun.h
new file mode 100644
index 00000000..7f864d1f
--- /dev/null
+++ b/windhoek/partitions/sun.h
@@ -0,0 +1,8 @@
+/*
+ * fs/partitions/sun.h
+ */
+
+#define SUN_LABEL_MAGIC 0xDABE
+#define SUN_VTOC_SANITY 0x600DDEEE
+
+int sun_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/windhoek/partitions/sysv68.c b/windhoek/partitions/sysv68.c
new file mode 100644
index 00000000..4eba27b7
--- /dev/null
+++ b/windhoek/partitions/sysv68.c
@@ -0,0 +1,92 @@
+/*
+ * fs/partitions/sysv68.c
+ *
+ * Copyright (C) 2007 Philippe De Muyter <phdm@macqel.be>
+ */
+
+#include "check.h"
+#include "sysv68.h"
+
+/*
+ * Volume ID structure: on first 256-bytes sector of disk
+ */
+
+struct volumeid {
+ u8 vid_unused[248];
+ u8 vid_mac[8]; /* ASCII string "MOTOROLA" */
+};
+
+/*
+ * config block: second 256-bytes sector on disk
+ */
+
+struct dkconfig {
+ u8 ios_unused0[128];
+ __be32 ios_slcblk; /* Slice table block number */
+ __be16 ios_slccnt; /* Number of entries in slice table */
+ u8 ios_unused1[122];
+};
+
+/*
+ * combined volumeid and dkconfig block
+ */
+
+struct dkblk0 {
+ struct volumeid dk_vid;
+ struct dkconfig dk_ios;
+};
+
+/*
+ * Slice Table Structure
+ */
+
+struct slice {
+ __be32 nblocks; /* slice size (in blocks) */
+ __be32 blkoff; /* block offset of slice */
+};
+
+
+int sysv68_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ int i, slices;
+ int slot = 1;
+ Sector sect;
+ unsigned char *data;
+ struct dkblk0 *b;
+ struct slice *slice;
+
+ data = read_dev_sector(bdev, 0, &sect);
+ if (!data)
+ return -1;
+
+ b = (struct dkblk0 *)data;
+ if (memcmp(b->dk_vid.vid_mac, "MOTOROLA", sizeof(b->dk_vid.vid_mac))) {
+ put_dev_sector(sect);
+ return 0;
+ }
+ slices = be16_to_cpu(b->dk_ios.ios_slccnt);
+ i = be32_to_cpu(b->dk_ios.ios_slcblk);
+ put_dev_sector(sect);
+
+ data = read_dev_sector(bdev, i, &sect);
+ if (!data)
+ return -1;
+
+ slices -= 1; /* last slice is the whole disk */
+ printk("sysV68: %s(s%u)", state->name, slices);
+ slice = (struct slice *)data;
+ for (i = 0; i < slices; i++, slice++) {
+ if (slot == state->limit)
+ break;
+ if (be32_to_cpu(slice->nblocks)) {
+ put_partition(state, slot,
+ be32_to_cpu(slice->blkoff),
+ be32_to_cpu(slice->nblocks));
+ printk("(s%u)", i);
+ }
+ slot++;
+ }
+ printk("\n");
+ put_dev_sector(sect);
+ return 1;
+}
diff --git a/windhoek/partitions/sysv68.h b/windhoek/partitions/sysv68.h
new file mode 100644
index 00000000..fa733f68
--- /dev/null
+++ b/windhoek/partitions/sysv68.h
@@ -0,0 +1 @@
+extern int sysv68_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/windhoek/partitions/ultrix.c b/windhoek/partitions/ultrix.c
new file mode 100644
index 00000000..ec852c11
--- /dev/null
+++ b/windhoek/partitions/ultrix.c
@@ -0,0 +1,48 @@
+/*
+ * fs/partitions/ultrix.c
+ *
+ * Code extracted from drivers/block/genhd.c
+ *
+ * Re-organised Jul 1999 Russell King
+ */
+
+#include "check.h"
+#include "ultrix.h"
+
+int ultrix_partition(struct parsed_partitions *state, struct block_device *bdev)
+{
+ int i;
+ Sector sect;
+ unsigned char *data;
+ struct ultrix_disklabel {
+ s32 pt_magic; /* magic no. indicating part. info exits */
+ s32 pt_valid; /* set by driver if pt is current */
+ struct pt_info {
+ s32 pi_nblocks; /* no. of sectors */
+ u32 pi_blkoff; /* block offset for start */
+ } pt_part[8];
+ } *label;
+
+#define PT_MAGIC 0x032957 /* Partition magic number */
+#define PT_VALID 1 /* Indicates if struct is valid */
+
+ data = read_dev_sector(bdev, (16384 - sizeof(*label))/512, &sect);
+ if (!data)
+ return -1;
+
+ label = (struct ultrix_disklabel *)(data + 512 - sizeof(*label));
+
+ if (label->pt_magic == PT_MAGIC && label->pt_valid == PT_VALID) {
+ for (i=0; i<8; i++)
+ if (label->pt_part[i].pi_nblocks)
+ put_partition(state, i+1,
+ label->pt_part[i].pi_blkoff,
+ label->pt_part[i].pi_nblocks);
+ put_dev_sector(sect);
+ printk ("\n");
+ return 1;
+ } else {
+ put_dev_sector(sect);
+ return 0;
+ }
+}
diff --git a/windhoek/partitions/ultrix.h b/windhoek/partitions/ultrix.h
new file mode 100644
index 00000000..a74bf8e2
--- /dev/null
+++ b/windhoek/partitions/ultrix.h
@@ -0,0 +1,5 @@
+/*
+ * fs/partitions/ultrix.h
+ */
+
+int ultrix_partition(struct parsed_partitions *state, struct block_device *bdev);
diff --git a/windhoek/windhoek_local.h b/windhoek/windhoek_local.h
new file mode 100644
index 00000000..d0d960b5
--- /dev/null
+++ b/windhoek/windhoek_local.h
@@ -0,0 +1,112 @@
+/**
+ * \file windhoek/server/src/windhoek_local.h
+ * \brief Windhoek internal server header
+ *
+ * \date 2008-01-29
+ * \author Bjoern Doebel <doebel@tudos.org>
+ *
+ * (c) 2008 Technische Universitaet Dresden
+ * This file is part of DROPS, which is distributed under the terms of the
+ * GNU General Public License 2. Please see the COPYING file for details.
+ */
+
+#ifndef WINDHOEK_LOCAL
+#define WINDHOEK_LOCAL
+
+#include <l4/windhoek/types.h>
+#include <l4/util/macros.h>
+
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+
+#define MAX_GENDISKS 10
+#define MAX_CLIENTS 16
+
+#undef DEBUG_MSG
+#undef WARN_UNIMPL
+
+#define DEBUG_MSG(msg, ...) printk("%s: \033[34m"msg"\033[0m\n", __FUNCTION__, ##__VA_ARGS__)
+#define WARN_UNIMPL DEBUG_MSG("unimplemented")
+
+extern struct gendisk* known_disks[MAX_GENDISKS];
+void gendisk_init(void);
+struct gendisk* find_disk_by_name(const char *name);
+
+/* Main thread, so that IRQ threads can send notifications */
+extern l4_threadid_t main_thread;
+
+struct windhoek_data_ds
+{
+ struct list_head next;
+ l4dm_dataspace_t ds;
+ l4_size_t size;
+};
+
+
+struct windhoek_bio_entry
+{
+ struct list_head next;
+ const windhoek_bio *wbio;
+};
+
+
+struct client_state
+{
+ l4dm_dataspace_t ctrl_ds;
+ l4_size_t ctrl_size;
+ void *ctrl_address; /**< address where ctrl ds is mapped */
+ l4dm_dataspace_t data_ds;
+ l4_size_t data_size;
+ void *data_address; /**< address where data ds is mapped */
+ struct gendisk *gendisk; /**< the corresponding disk object */
+ l4_threadid_t client_id;
+ l4_threadid_t notify_waiter; /**< thread waiting for notification */
+ struct list_head finished_bios; /**< list of bios that have been serviced
+ - used to store until notifications
+ are retrieved by the client */
+ spinlock_t lock; /**< lock XXX needed? */
+};
+
+
+extern struct client_state clients[MAX_CLIENTS];
+
+void client_state_init(void);
+
+
+/** Check whether there is a valid client with this handle */
+L4_INLINE int client_state_valid(int idx)
+{
+ return clients[idx].data_address != NULL;
+}
+
+
+/** Check whether the handle itself is valid */
+L4_INLINE int handle_valid(int handle)
+{
+ return ((handle >= 0) && (handle < MAX_CLIENTS));
+}
+
+
+/** sanity check handle and caller */
+L4_INLINE int sanity_check(int handle, l4_threadid_t t)
+{
+ if (!handle_valid(handle) ||
+ !client_state_valid(handle)) {
+ LOG_Error("invalid handle %d", handle);
+ return -EACCES;
+ }
+
+ if (!l4_task_equal(clients[handle].client_id, t)) {
+ LOG_Error("Close by "l4util_idfmt", but owner is "l4util_idfmt,
+ l4util_idstr(t), l4util_idstr(clients[handle].client_id));
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+#define SANITY_CHECK(varname, handle, thread) \
+ int varname = sanity_check(handle, thread); \
+ if (varname) return varname;
+
+#endif