summaryrefslogtreecommitdiff
path: root/libdde-linux26/contrib/block
diff options
context:
space:
mode:
Diffstat (limited to 'libdde-linux26/contrib/block')
-rw-r--r--libdde-linux26/contrib/block/as-iosched.c1526
-rw-r--r--libdde-linux26/contrib/block/blk-barrier.c419
-rw-r--r--libdde-linux26/contrib/block/blk-exec.c106
-rw-r--r--libdde-linux26/contrib/block/blk-ioc.c182
-rw-r--r--libdde-linux26/contrib/block/blk-merge.c428
-rw-r--r--libdde-linux26/contrib/block/blk-settings.c474
-rw-r--r--libdde-linux26/contrib/block/blk-softirq.c175
-rw-r--r--libdde-linux26/contrib/block/blk-sysfs.c426
-rw-r--r--libdde-linux26/contrib/block/blk-tag.c402
-rw-r--r--libdde-linux26/contrib/block/blk-timeout.c232
-rw-r--r--libdde-linux26/contrib/block/blk.h119
-rw-r--r--libdde-linux26/contrib/block/cfq-iosched.c2465
-rw-r--r--libdde-linux26/contrib/block/deadline-iosched.c478
-rw-r--r--libdde-linux26/contrib/block/elevator.c1231
-rw-r--r--libdde-linux26/contrib/block/ioctl.c372
-rw-r--r--libdde-linux26/contrib/block/noop-iosched.c123
-rw-r--r--libdde-linux26/contrib/block/scsi_ioctl.c652
17 files changed, 9810 insertions, 0 deletions
diff --git a/libdde-linux26/contrib/block/as-iosched.c b/libdde-linux26/contrib/block/as-iosched.c
new file mode 100644
index 00000000..5b363ced
--- /dev/null
+++ b/libdde-linux26/contrib/block/as-iosched.c
@@ -0,0 +1,1526 @@
+/*
+ * Anticipatory & deadline i/o scheduler.
+ *
+ * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
+ * Nick Piggin <nickpiggin@yahoo.com.au>
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <linux/interrupt.h>
+
+#include <ddekit/timer.h>
+
+#define REQ_SYNC 1
+#define REQ_ASYNC 0
+
+/*
+ * See Documentation/block/as-iosched.txt
+ */
+
+/*
+ * max time before a read is submitted.
+ */
+#define default_read_expire (HZ / 8)
+
+/*
+ * ditto for writes, these limits are not hard, even
+ * if the disk is capable of satisfying them.
+ */
+#define default_write_expire (HZ / 4)
+
+/*
+ * read_batch_expire describes how long we will allow a stream of reads to
+ * persist before looking to see whether it is time to switch over to writes.
+ */
+#define default_read_batch_expire (HZ / 2)
+
+/*
+ * write_batch_expire describes how long we want a stream of writes to run for.
+ * This is not a hard limit, but a target we set for the auto-tuning thingy.
+ * See, the problem is: we can send a lot of writes to disk cache / TCQ in
+ * a short amount of time...
+ */
+#define default_write_batch_expire (HZ / 8)
+
+/*
+ * max time we may wait to anticipate a read (default around 6ms)
+ */
+#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
+
+/*
+ * Keep track of up to 20ms thinktimes. We can go as big as we like here,
+ * however huge values tend to interfere and not decay fast enough. A program
+ * might be in a non-io phase of operation. Waiting on user input for example,
+ * or doing a lengthy computation. A small penalty can be justified there, and
+ * will still catch out those processes that constantly have large thinktimes.
+ */
+#define MAX_THINKTIME (HZ/50UL)
+
+/* Bits in as_io_context.state */
+enum as_io_states {
+ AS_TASK_RUNNING=0, /* Process has not exited */
+ AS_TASK_IOSTARTED, /* Process has started some IO */
+ AS_TASK_IORUNNING, /* Process has completed some IO */
+};
+
+enum anticipation_status {
+ ANTIC_OFF=0, /* Not anticipating (normal operation) */
+ ANTIC_WAIT_REQ, /* The last read has not yet completed */
+ ANTIC_WAIT_NEXT, /* Currently anticipating a request vs
+ last read (which has completed) */
+ ANTIC_FINISHED, /* Anticipating but have found a candidate
+ * or timed out */
+};
+
+struct as_data {
+ /*
+ * run time data
+ */
+
+ struct request_queue *q; /* the "owner" queue */
+
+ /*
+ * requests (as_rq s) are present on both sort_list and fifo_list
+ */
+ struct rb_root sort_list[2];
+ struct list_head fifo_list[2];
+
+ struct request *next_rq[2]; /* next in sort order */
+ sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
+
+ unsigned long exit_prob; /* probability a task will exit while
+ being waited on */
+ unsigned long exit_no_coop; /* probablility an exited task will
+ not be part of a later cooperating
+ request */
+ unsigned long new_ttime_total; /* mean thinktime on new proc */
+ unsigned long new_ttime_mean;
+ u64 new_seek_total; /* mean seek on new proc */
+ sector_t new_seek_mean;
+
+ unsigned long current_batch_expires;
+ unsigned long last_check_fifo[2];
+ int changed_batch; /* 1: waiting for old batch to end */
+ int new_batch; /* 1: waiting on first read complete */
+ int batch_data_dir; /* current batch REQ_SYNC / REQ_ASYNC */
+ int write_batch_count; /* max # of reqs in a write batch */
+ int current_write_count; /* how many requests left this batch */
+ int write_batch_idled; /* has the write batch gone idle? */
+
+ enum anticipation_status antic_status;
+ unsigned long antic_start; /* jiffies: when it started */
+ struct timer_list antic_timer; /* anticipatory scheduling timer */
+ struct work_struct antic_work; /* Deferred unplugging */
+ struct io_context *io_context; /* Identify the expected process */
+ int ioc_finished; /* IO associated with io_context is finished */
+ int nr_dispatched;
+
+ /*
+ * settings that change how the i/o scheduler behaves
+ */
+ unsigned long fifo_expire[2];
+ unsigned long batch_expire[2];
+ unsigned long antic_expire;
+};
+
+/*
+ * per-request data.
+ */
+enum arq_state {
+ AS_RQ_NEW=0, /* New - not referenced and not on any lists */
+ AS_RQ_QUEUED, /* In the request queue. It belongs to the
+ scheduler */
+ AS_RQ_DISPATCHED, /* On the dispatch list. It belongs to the
+ driver now */
+ AS_RQ_PRESCHED, /* Debug poisoning for requests being used */
+ AS_RQ_REMOVED,
+ AS_RQ_MERGED,
+ AS_RQ_POSTSCHED, /* when they shouldn't be */
+};
+
+#define RQ_IOC(rq) ((struct io_context *) (rq)->elevator_private)
+#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
+#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
+
+static DEFINE_PER_CPU(unsigned long, ioc_count);
+static struct completion *ioc_gone;
+static DEFINE_SPINLOCK(ioc_gone_lock);
+
+static void as_move_to_dispatch(struct as_data *ad, struct request *rq);
+static void as_antic_stop(struct as_data *ad);
+
+/*
+ * IO Context helper functions
+ */
+
+/* Called to deallocate the as_io_context */
+static void free_as_io_context(struct as_io_context *aic)
+{
+ kfree(aic);
+ elv_ioc_count_dec(ioc_count);
+ if (ioc_gone) {
+ /*
+ * AS scheduler is exiting, grab exit lock and check
+ * the pending io context count. If it hits zero,
+ * complete ioc_gone and set it back to NULL.
+ */
+ spin_lock(&ioc_gone_lock);
+ if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
+ complete(ioc_gone);
+ ioc_gone = NULL;
+ }
+ spin_unlock(&ioc_gone_lock);
+ }
+}
+
+static void as_trim(struct io_context *ioc)
+{
+ spin_lock_irq(&ioc->lock);
+ if (ioc->aic)
+ free_as_io_context(ioc->aic);
+ ioc->aic = NULL;
+ spin_unlock_irq(&ioc->lock);
+}
+
+/* Called when the task exits */
+static void exit_as_io_context(struct as_io_context *aic)
+{
+ WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
+ clear_bit(AS_TASK_RUNNING, &aic->state);
+}
+
+static struct as_io_context *alloc_as_io_context(void)
+{
+ struct as_io_context *ret;
+
+ ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+ if (ret) {
+ ret->dtor = free_as_io_context;
+ ret->exit = exit_as_io_context;
+ ret->state = 1 << AS_TASK_RUNNING;
+ atomic_set(&ret->nr_queued, 0);
+ atomic_set(&ret->nr_dispatched, 0);
+ spin_lock_init(&ret->lock);
+ ret->ttime_total = 0;
+ ret->ttime_samples = 0;
+ ret->ttime_mean = 0;
+ ret->seek_total = 0;
+ ret->seek_samples = 0;
+ ret->seek_mean = 0;
+ elv_ioc_count_inc(ioc_count);
+ }
+
+ return ret;
+}
+
+/*
+ * If the current task has no AS IO context then create one and initialise it.
+ * Then take a ref on the task's io context and return it.
+ */
+static struct io_context *as_get_io_context(int node)
+{
+ struct io_context *ioc = get_io_context(GFP_ATOMIC, node);
+ if (ioc && !ioc->aic) {
+ ioc->aic = alloc_as_io_context();
+ if (!ioc->aic) {
+ put_io_context(ioc);
+ ioc = NULL;
+ }
+ }
+ return ioc;
+}
+
+static void as_put_io_context(struct request *rq)
+{
+ struct as_io_context *aic;
+
+ if (unlikely(!RQ_IOC(rq)))
+ return;
+
+ aic = RQ_IOC(rq)->aic;
+
+ if (rq_is_sync(rq) && aic) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&aic->lock, flags);
+ set_bit(AS_TASK_IORUNNING, &aic->state);
+ aic->last_end_request = jiffies;
+ spin_unlock_irqrestore(&aic->lock, flags);
+ }
+
+ put_io_context(RQ_IOC(rq));
+}
+
+/*
+ * rb tree support functions
+ */
+#define RQ_RB_ROOT(ad, rq) (&(ad)->sort_list[rq_is_sync((rq))])
+
+static void as_add_rq_rb(struct as_data *ad, struct request *rq)
+{
+ struct request *alias;
+
+ while ((unlikely(alias = elv_rb_add(RQ_RB_ROOT(ad, rq), rq)))) {
+ as_move_to_dispatch(ad, alias);
+ as_antic_stop(ad);
+ }
+}
+
+static inline void as_del_rq_rb(struct as_data *ad, struct request *rq)
+{
+ elv_rb_del(RQ_RB_ROOT(ad, rq), rq);
+}
+
+/*
+ * IO Scheduler proper
+ */
+
+#define MAXBACK (1024 * 1024) /*
+ * Maximum distance the disk will go backward
+ * for a request.
+ */
+
+#define BACK_PENALTY 2
+
+/*
+ * as_choose_req selects the preferred one of two requests of the same data_dir
+ * ignoring time - eg. timeouts, which is the job of as_dispatch_request
+ */
+static struct request *
+as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
+{
+ int data_dir;
+ sector_t last, s1, s2, d1, d2;
+ int r1_wrap=0, r2_wrap=0; /* requests are behind the disk head */
+ const sector_t maxback = MAXBACK;
+
+ if (rq1 == NULL || rq1 == rq2)
+ return rq2;
+ if (rq2 == NULL)
+ return rq1;
+
+ data_dir = rq_is_sync(rq1);
+
+ last = ad->last_sector[data_dir];
+ s1 = rq1->sector;
+ s2 = rq2->sector;
+
+ BUG_ON(data_dir != rq_is_sync(rq2));
+
+ /*
+ * Strict one way elevator _except_ in the case where we allow
+ * short backward seeks which are biased as twice the cost of a
+ * similar forward seek.
+ */
+ if (s1 >= last)
+ d1 = s1 - last;
+ else if (s1+maxback >= last)
+ d1 = (last - s1)*BACK_PENALTY;
+ else {
+ r1_wrap = 1;
+ d1 = 0; /* shut up, gcc */
+ }
+
+ if (s2 >= last)
+ d2 = s2 - last;
+ else if (s2+maxback >= last)
+ d2 = (last - s2)*BACK_PENALTY;
+ else {
+ r2_wrap = 1;
+ d2 = 0;
+ }
+
+ /* Found required data */
+ if (!r1_wrap && r2_wrap)
+ return rq1;
+ else if (!r2_wrap && r1_wrap)
+ return rq2;
+ else if (r1_wrap && r2_wrap) {
+ /* both behind the head */
+ if (s1 <= s2)
+ return rq1;
+ else
+ return rq2;
+ }
+
+ /* Both requests in front of the head */
+ if (d1 < d2)
+ return rq1;
+ else if (d2 < d1)
+ return rq2;
+ else {
+ if (s1 >= s2)
+ return rq1;
+ else
+ return rq2;
+ }
+}
+
+/*
+ * as_find_next_rq finds the next request after @prev in elevator order.
+ * this with as_choose_req form the basis for how the scheduler chooses
+ * what request to process next. Anticipation works on top of this.
+ */
+static struct request *
+as_find_next_rq(struct as_data *ad, struct request *last)
+{
+ struct rb_node *rbnext = rb_next(&last->rb_node);
+ struct rb_node *rbprev = rb_prev(&last->rb_node);
+ struct request *next = NULL, *prev = NULL;
+
+ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
+
+ if (rbprev)
+ prev = rb_entry_rq(rbprev);
+
+ if (rbnext)
+ next = rb_entry_rq(rbnext);
+ else {
+ const int data_dir = rq_is_sync(last);
+
+ rbnext = rb_first(&ad->sort_list[data_dir]);
+ if (rbnext && rbnext != &last->rb_node)
+ next = rb_entry_rq(rbnext);
+ }
+
+ return as_choose_req(ad, next, prev);
+}
+
+/*
+ * anticipatory scheduling functions follow
+ */
+
+/*
+ * as_antic_expired tells us when we have anticipated too long.
+ * The funny "absolute difference" math on the elapsed time is to handle
+ * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
+ */
+static int as_antic_expired(struct as_data *ad)
+{
+ long delta_jif;
+
+ delta_jif = jiffies - ad->antic_start;
+ if (unlikely(delta_jif < 0))
+ delta_jif = -delta_jif;
+ if (delta_jif < ad->antic_expire)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * as_antic_waitnext starts anticipating that a nice request will soon be
+ * submitted. See also as_antic_waitreq
+ */
+static void as_antic_waitnext(struct as_data *ad)
+{
+ unsigned long timeout;
+
+ BUG_ON(ad->antic_status != ANTIC_OFF
+ && ad->antic_status != ANTIC_WAIT_REQ);
+
+ timeout = ad->antic_start + ad->antic_expire;
+
+ mod_timer(&ad->antic_timer, timeout);
+
+ ad->antic_status = ANTIC_WAIT_NEXT;
+}
+
+/*
+ * as_antic_waitreq starts anticipating. We don't start timing the anticipation
+ * until the request that we're anticipating on has finished. This means we
+ * are timing from when the candidate process wakes up hopefully.
+ */
+static void as_antic_waitreq(struct as_data *ad)
+{
+ BUG_ON(ad->antic_status == ANTIC_FINISHED);
+ if (ad->antic_status == ANTIC_OFF) {
+ if (!ad->io_context || ad->ioc_finished)
+ as_antic_waitnext(ad);
+ else
+ ad->antic_status = ANTIC_WAIT_REQ;
+ }
+}
+
+/*
+ * This is called directly by the functions in this file to stop anticipation.
+ * We kill the timer and schedule a call to the request_fn asap.
+ */
+static void as_antic_stop(struct as_data *ad)
+{
+ int status = ad->antic_status;
+
+ if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
+ if (status == ANTIC_WAIT_NEXT)
+ del_timer(&ad->antic_timer);
+ ad->antic_status = ANTIC_FINISHED;
+ /* see as_work_handler */
+ kblockd_schedule_work(ad->q, &ad->antic_work);
+ }
+}
+
+/*
+ * as_antic_timeout is the timer function set by as_antic_waitnext.
+ */
+static void as_antic_timeout(unsigned long data)
+{
+ struct request_queue *q = (struct request_queue *)data;
+ struct as_data *ad = q->elevator->elevator_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (ad->antic_status == ANTIC_WAIT_REQ
+ || ad->antic_status == ANTIC_WAIT_NEXT) {
+ struct as_io_context *aic;
+ spin_lock(&ad->io_context->lock);
+ aic = ad->io_context->aic;
+
+ ad->antic_status = ANTIC_FINISHED;
+ kblockd_schedule_work(q, &ad->antic_work);
+
+ if (aic->ttime_samples == 0) {
+ /* process anticipated on has exited or timed out*/
+ ad->exit_prob = (7*ad->exit_prob + 256)/8;
+ }
+ if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
+ /* process not "saved" by a cooperating request */
+ ad->exit_no_coop = (7*ad->exit_no_coop + 256)/8;
+ }
+ spin_unlock(&ad->io_context->lock);
+ }
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic,
+ unsigned long ttime)
+{
+ /* fixed point: 1.0 == 1<<8 */
+ if (aic->ttime_samples == 0) {
+ ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
+ ad->new_ttime_mean = ad->new_ttime_total / 256;
+
+ ad->exit_prob = (7*ad->exit_prob)/8;
+ }
+ aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
+ aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
+ aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
+}
+
+static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic,
+ sector_t sdist)
+{
+ u64 total;
+
+ if (aic->seek_samples == 0) {
+ ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
+ ad->new_seek_mean = ad->new_seek_total / 256;
+ }
+
+ /*
+ * Don't allow the seek distance to get too large from the
+ * odd fragment, pagein, etc
+ */
+ if (aic->seek_samples <= 60) /* second&third seek */
+ sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
+ else
+ sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*64);
+
+ aic->seek_samples = (7*aic->seek_samples + 256) / 8;
+ aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
+ total = aic->seek_total + (aic->seek_samples/2);
+ do_div(total, aic->seek_samples);
+ aic->seek_mean = (sector_t)total;
+}
+
+/*
+ * as_update_iohist keeps a decaying histogram of IO thinktimes, and
+ * updates @aic->ttime_mean based on that. It is called when a new
+ * request is queued.
+ */
+static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
+ struct request *rq)
+{
+ int data_dir = rq_is_sync(rq);
+ unsigned long thinktime = 0;
+ sector_t seek_dist;
+
+ if (aic == NULL)
+ return;
+
+ if (data_dir == REQ_SYNC) {
+ unsigned long in_flight = atomic_read(&aic->nr_queued)
+ + atomic_read(&aic->nr_dispatched);
+ spin_lock(&aic->lock);
+ if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
+ test_bit(AS_TASK_IOSTARTED, &aic->state)) {
+ /* Calculate read -> read thinktime */
+ if (test_bit(AS_TASK_IORUNNING, &aic->state)
+ && in_flight == 0) {
+ thinktime = jiffies - aic->last_end_request;
+ thinktime = min(thinktime, MAX_THINKTIME-1);
+ }
+ as_update_thinktime(ad, aic, thinktime);
+
+ /* Calculate read -> read seek distance */
+ if (aic->last_request_pos < rq->sector)
+ seek_dist = rq->sector - aic->last_request_pos;
+ else
+ seek_dist = aic->last_request_pos - rq->sector;
+ as_update_seekdist(ad, aic, seek_dist);
+ }
+ aic->last_request_pos = rq->sector + rq->nr_sectors;
+ set_bit(AS_TASK_IOSTARTED, &aic->state);
+ spin_unlock(&aic->lock);
+ }
+}
+
+/*
+ * as_close_req decides if one request is considered "close" to the
+ * previous one issued.
+ */
+static int as_close_req(struct as_data *ad, struct as_io_context *aic,
+ struct request *rq)
+{
+ unsigned long delay; /* jiffies */
+ sector_t last = ad->last_sector[ad->batch_data_dir];
+ sector_t next = rq->sector;
+ sector_t delta; /* acceptable close offset (in sectors) */
+ sector_t s;
+
+ if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
+ delay = 0;
+ else
+ delay = jiffies - ad->antic_start;
+
+ if (delay == 0)
+ delta = 8192;
+ else if (delay <= (20 * HZ / 1000) && delay <= ad->antic_expire)
+ delta = 8192 << delay;
+ else
+ return 1;
+
+ if ((last <= next + (delta>>1)) && (next <= last + delta))
+ return 1;
+
+ if (last < next)
+ s = next - last;
+ else
+ s = last - next;
+
+ if (aic->seek_samples == 0) {
+ /*
+ * Process has just started IO. Use past statistics to
+ * gauge success possibility
+ */
+ if (ad->new_seek_mean > s) {
+ /* this request is better than what we're expecting */
+ return 1;
+ }
+
+ } else {
+ if (aic->seek_mean > s) {
+ /* this request is better than what we're expecting */
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * as_can_break_anticipation returns true if we have been anticipating this
+ * request.
+ *
+ * It also returns true if the process against which we are anticipating
+ * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
+ * dispatch it ASAP, because we know that application will not be submitting
+ * any new reads.
+ *
+ * If the task which has submitted the request has exited, break anticipation.
+ *
+ * If this task has queued some other IO, do not enter enticipation.
+ */
+static int as_can_break_anticipation(struct as_data *ad, struct request *rq)
+{
+ struct io_context *ioc;
+ struct as_io_context *aic;
+
+ ioc = ad->io_context;
+ BUG_ON(!ioc);
+ spin_lock(&ioc->lock);
+
+ if (rq && ioc == RQ_IOC(rq)) {
+ /* request from same process */
+ spin_unlock(&ioc->lock);
+ return 1;
+ }
+
+ if (ad->ioc_finished && as_antic_expired(ad)) {
+ /*
+ * In this situation status should really be FINISHED,
+ * however the timer hasn't had the chance to run yet.
+ */
+ spin_unlock(&ioc->lock);
+ return 1;
+ }
+
+ aic = ioc->aic;
+ if (!aic) {
+ spin_unlock(&ioc->lock);
+ return 0;
+ }
+
+ if (atomic_read(&aic->nr_queued) > 0) {
+ /* process has more requests queued */
+ spin_unlock(&ioc->lock);
+ return 1;
+ }
+
+ if (atomic_read(&aic->nr_dispatched) > 0) {
+ /* process has more requests dispatched */
+ spin_unlock(&ioc->lock);
+ return 1;
+ }
+
+ if (rq && rq_is_sync(rq) && as_close_req(ad, aic, rq)) {
+ /*
+ * Found a close request that is not one of ours.
+ *
+ * This makes close requests from another process update
+ * our IO history. Is generally useful when there are
+ * two or more cooperating processes working in the same
+ * area.
+ */
+ if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
+ if (aic->ttime_samples == 0)
+ ad->exit_prob = (7*ad->exit_prob + 256)/8;
+
+ ad->exit_no_coop = (7*ad->exit_no_coop)/8;
+ }
+
+ as_update_iohist(ad, aic, rq);
+ spin_unlock(&ioc->lock);
+ return 1;
+ }
+
+ if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
+ /* process anticipated on has exited */
+ if (aic->ttime_samples == 0)
+ ad->exit_prob = (7*ad->exit_prob + 256)/8;
+
+ if (ad->exit_no_coop > 128) {
+ spin_unlock(&ioc->lock);
+ return 1;
+ }
+ }
+
+ if (aic->ttime_samples == 0) {
+ if (ad->new_ttime_mean > ad->antic_expire) {
+ spin_unlock(&ioc->lock);
+ return 1;
+ }
+ if (ad->exit_prob * ad->exit_no_coop > 128*256) {
+ spin_unlock(&ioc->lock);
+ return 1;
+ }
+ } else if (aic->ttime_mean > ad->antic_expire) {
+ /* the process thinks too much between requests */
+ spin_unlock(&ioc->lock);
+ return 1;
+ }
+ spin_unlock(&ioc->lock);
+ return 0;
+}
+
+/*
+ * as_can_anticipate indicates whether we should either run rq
+ * or keep anticipating a better request.
+ */
+static int as_can_anticipate(struct as_data *ad, struct request *rq)
+{
+#if 0 /* disable for now, we need to check tag level as well */
+ /*
+ * SSD device without seek penalty, disable idling
+ */
+ if (blk_queue_nonrot(ad->q)) axman
+ return 0;
+#endif
+
+ if (!ad->io_context)
+ /*
+ * Last request submitted was a write
+ */
+ return 0;
+
+ if (ad->antic_status == ANTIC_FINISHED)
+ /*
+ * Don't restart if we have just finished. Run the next request
+ */
+ return 0;
+
+ if (as_can_break_anticipation(ad, rq))
+ /*
+ * This request is a good candidate. Don't keep anticipating,
+ * run it.
+ */
+ return 0;
+
+ /*
+ * OK from here, we haven't finished, and don't have a decent request!
+ * Status is either ANTIC_OFF so start waiting,
+ * ANTIC_WAIT_REQ so continue waiting for request to finish
+ * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
+ */
+
+ return 1;
+}
+
+/*
+ * as_update_rq must be called whenever a request (rq) is added to
+ * the sort_list. This function keeps caches up to date, and checks if the
+ * request might be one we are "anticipating"
+ */
+static void as_update_rq(struct as_data *ad, struct request *rq)
+{
+ const int data_dir = rq_is_sync(rq);
+
+ /* keep the next_rq cache up to date */
+ ad->next_rq[data_dir] = as_choose_req(ad, rq, ad->next_rq[data_dir]);
+
+ /*
+ * have we been anticipating this request?
+ * or does it come from the same process as the one we are anticipating
+ * for?
+ */
+ if (ad->antic_status == ANTIC_WAIT_REQ
+ || ad->antic_status == ANTIC_WAIT_NEXT) {
+ if (as_can_break_anticipation(ad, rq))
+ as_antic_stop(ad);
+ }
+}
+
+/*
+ * Gathers timings and resizes the write batch automatically
+ */
+static void update_write_batch(struct as_data *ad)
+{
+ unsigned long batch = ad->batch_expire[REQ_ASYNC];
+ long write_time;
+
+ write_time = (jiffies - ad->current_batch_expires) + batch;
+ if (write_time < 0)
+ write_time = 0;
+
+ if (write_time > batch && !ad->write_batch_idled) {
+ if (write_time > batch * 3)
+ ad->write_batch_count /= 2;
+ else
+ ad->write_batch_count--;
+ } else if (write_time < batch && ad->current_write_count == 0) {
+ if (batch > write_time * 3)
+ ad->write_batch_count *= 2;
+ else
+ ad->write_batch_count++;
+ }
+
+ if (ad->write_batch_count < 1)
+ ad->write_batch_count = 1;
+}
+
+/*
+ * as_completed_request is to be called when a request has completed and
+ * returned something to the requesting process, be it an error or data.
+ */
+static void as_completed_request(struct request_queue *q, struct request *rq)
+{
+ struct as_data *ad = q->elevator->elevator_data;
+
+ WARN_ON(!list_empty(&rq->queuelist));
+
+ if (RQ_STATE(rq) != AS_RQ_REMOVED) {
+ WARN(1, "rq->state %d\n", RQ_STATE(rq));
+ goto out;
+ }
+
+ if (ad->changed_batch && ad->nr_dispatched == 1) {
+ ad->current_batch_expires = jiffies +
+ ad->batch_expire[ad->batch_data_dir];
+ kblockd_schedule_work(q, &ad->antic_work);
+ ad->changed_batch = 0;
+
+ if (ad->batch_data_dir == REQ_SYNC)
+ ad->new_batch = 1;
+ }
+ WARN_ON(ad->nr_dispatched == 0);
+ ad->nr_dispatched--;
+
+ /*
+ * Start counting the batch from when a request of that direction is
+ * actually serviced. This should help devices with big TCQ windows
+ * and writeback caches
+ */
+ if (ad->new_batch && ad->batch_data_dir == rq_is_sync(rq)) {
+ update_write_batch(ad);
+ ad->current_batch_expires = jiffies +
+ ad->batch_expire[REQ_SYNC];
+ ad->new_batch = 0;
+ }
+
+ if (ad->io_context == RQ_IOC(rq) && ad->io_context) {
+ ad->antic_start = jiffies;
+ ad->ioc_finished = 1;
+ if (ad->antic_status == ANTIC_WAIT_REQ) {
+ /*
+ * We were waiting on this request, now anticipate
+ * the next one
+ */
+ as_antic_waitnext(ad);
+ }
+ }
+
+ as_put_io_context(rq);
+out:
+ RQ_SET_STATE(rq, AS_RQ_POSTSCHED);
+}
+
+/*
+ * as_remove_queued_request removes a request from the pre dispatch queue
+ * without updating refcounts. It is expected the caller will drop the
+ * reference unless it replaces the request at somepart of the elevator
+ * (ie. the dispatch queue)
+ */
+static void as_remove_queued_request(struct request_queue *q,
+ struct request *rq)
+{
+ const int data_dir = rq_is_sync(rq);
+ struct as_data *ad = q->elevator->elevator_data;
+ struct io_context *ioc;
+
+ WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
+
+ ioc = RQ_IOC(rq);
+ if (ioc && ioc->aic) {
+ BUG_ON(!atomic_read(&ioc->aic->nr_queued));
+ atomic_dec(&ioc->aic->nr_queued);
+ }
+
+ /*
+ * Update the "next_rq" cache if we are about to remove its
+ * entry
+ */
+ if (ad->next_rq[data_dir] == rq)
+ ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
+
+ rq_fifo_clear(rq);
+ as_del_rq_rb(ad, rq);
+}
+
+/*
+ * as_fifo_expired returns 0 if there are no expired requests on the fifo,
+ * 1 otherwise. It is ratelimited so that we only perform the check once per
+ * `fifo_expire' interval. Otherwise a large number of expired requests
+ * would create a hopeless seekstorm.
+ *
+ * See as_antic_expired comment.
+ */
+static int as_fifo_expired(struct as_data *ad, int adir)
+{
+ struct request *rq;
+ long delta_jif;
+
+ delta_jif = jiffies - ad->last_check_fifo[adir];
+ if (unlikely(delta_jif < 0))
+ delta_jif = -delta_jif;
+ if (delta_jif < ad->fifo_expire[adir])
+ return 0;
+
+ ad->last_check_fifo[adir] = jiffies;
+
+ if (list_empty(&ad->fifo_list[adir]))
+ return 0;
+
+ rq = rq_entry_fifo(ad->fifo_list[adir].next);
+
+ return time_after(jiffies, rq_fifo_time(rq));
+}
+
+/*
+ * as_batch_expired returns true if the current batch has expired. A batch
+ * is a set of reads or a set of writes.
+ */
+static inline int as_batch_expired(struct as_data *ad)
+{
+ if (ad->changed_batch || ad->new_batch)
+ return 0;
+
+ if (ad->batch_data_dir == REQ_SYNC)
+ /* TODO! add a check so a complete fifo gets written? */
+ return time_after(jiffies, ad->current_batch_expires);
+
+ return time_after(jiffies, ad->current_batch_expires)
+ || ad->current_write_count == 0;
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
+{
+ const int data_dir = rq_is_sync(rq);
+
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+
+ as_antic_stop(ad);
+ ad->antic_status = ANTIC_OFF;
+
+ /*
+ * This has to be set in order to be correctly updated by
+ * as_find_next_rq
+ */
+ ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
+
+ if (data_dir == REQ_SYNC) {
+ struct io_context *ioc = RQ_IOC(rq);
+ /* In case we have to anticipate after this */
+ copy_io_context(&ad->io_context, &ioc);
+ } else {
+ if (ad->io_context) {
+ put_io_context(ad->io_context);
+ ad->io_context = NULL;
+ }
+
+ if (ad->current_write_count != 0)
+ ad->current_write_count--;
+ }
+ ad->ioc_finished = 0;
+
+ ad->next_rq[data_dir] = as_find_next_rq(ad, rq);
+
+ /*
+ * take it off the sort and fifo list, add to dispatch queue
+ */
+ as_remove_queued_request(ad->q, rq);
+ WARN_ON(RQ_STATE(rq) != AS_RQ_QUEUED);
+
+ elv_dispatch_sort(ad->q, rq);
+
+ RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
+ if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
+ atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
+ ad->nr_dispatched++;
+}
+
+/*
+ * as_dispatch_request selects the best request according to
+ * read/write expire, batch expire, etc, and moves it to the dispatch
+ * queue. Returns 1 if a request was found, 0 otherwise.
+ */
+static int as_dispatch_request(struct request_queue *q, int force)
+{
+ struct as_data *ad = q->elevator->elevator_data;
+ const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
+ const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
+ struct request *rq;
+
+ if (unlikely(force)) {
+ /*
+ * Forced dispatch, accounting is useless. Reset
+ * accounting states and dump fifo_lists. Note that
+ * batch_data_dir is reset to REQ_SYNC to avoid
+ * screwing write batch accounting as write batch
+ * accounting occurs on W->R transition.
+ */
+ int dispatched = 0;
+
+ ad->batch_data_dir = REQ_SYNC;
+ ad->changed_batch = 0;
+ ad->new_batch = 0;
+
+ while (ad->next_rq[REQ_SYNC]) {
+ as_move_to_dispatch(ad, ad->next_rq[REQ_SYNC]);
+ dispatched++;
+ }
+ ad->last_check_fifo[REQ_SYNC] = jiffies;
+
+ while (ad->next_rq[REQ_ASYNC]) {
+ as_move_to_dispatch(ad, ad->next_rq[REQ_ASYNC]);
+ dispatched++;
+ }
+ ad->last_check_fifo[REQ_ASYNC] = jiffies;
+
+ return dispatched;
+ }
+
+ /* Signal that the write batch was uncontended, so we can't time it */
+ if (ad->batch_data_dir == REQ_ASYNC && !reads) {
+ if (ad->current_write_count == 0 || !writes)
+ ad->write_batch_idled = 1;
+ }
+
+ if (!(reads || writes)
+ || ad->antic_status == ANTIC_WAIT_REQ
+ || ad->antic_status == ANTIC_WAIT_NEXT
+ || ad->changed_batch)
+ return 0;
+
+ if (!(reads && writes && as_batch_expired(ad))) {
+ /*
+ * batch is still running or no reads or no writes
+ */
+ rq = ad->next_rq[ad->batch_data_dir];
+
+ if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) {
+ if (as_fifo_expired(ad, REQ_SYNC))
+ goto fifo_expired;
+
+ if (as_can_anticipate(ad, rq)) {
+ as_antic_waitreq(ad);
+ return 0;
+ }
+ }
+
+ if (rq) {
+ /* we have a "next request" */
+ if (reads && !writes)
+ ad->current_batch_expires =
+ jiffies + ad->batch_expire[REQ_SYNC];
+ goto dispatch_request;
+ }
+ }
+
+ /*
+ * at this point we are not running a batch. select the appropriate
+ * data direction (read / write)
+ */
+
+ if (reads) {
+ BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_SYNC]));
+
+ if (writes && ad->batch_data_dir == REQ_SYNC)
+ /*
+ * Last batch was a read, switch to writes
+ */
+ goto dispatch_writes;
+
+ if (ad->batch_data_dir == REQ_ASYNC) {
+ WARN_ON(ad->new_batch);
+ ad->changed_batch = 1;
+ }
+ ad->batch_data_dir = REQ_SYNC;
+ rq = rq_entry_fifo(ad->fifo_list[REQ_SYNC].next);
+ ad->last_check_fifo[ad->batch_data_dir] = jiffies;
+ goto dispatch_request;
+ }
+
+ /*
+ * the last batch was a read
+ */
+
+ if (writes) {
+dispatch_writes:
+ BUG_ON(RB_EMPTY_ROOT(&ad->sort_list[REQ_ASYNC]));
+
+ if (ad->batch_data_dir == REQ_SYNC) {
+ ad->changed_batch = 1;
+
+ /*
+ * new_batch might be 1 when the queue runs out of
+ * reads. A subsequent submission of a write might
+ * cause a change of batch before the read is finished.
+ */
+ ad->new_batch = 0;
+ }
+ ad->batch_data_dir = REQ_ASYNC;
+ ad->current_write_count = ad->write_batch_count;
+ ad->write_batch_idled = 0;
+ rq = rq_entry_fifo(ad->fifo_list[REQ_ASYNC].next);
+ ad->last_check_fifo[REQ_ASYNC] = jiffies;
+ goto dispatch_request;
+ }
+
+ BUG();
+ return 0;
+
+dispatch_request:
+ /*
+ * If a request has expired, service it.
+ */
+
+ if (as_fifo_expired(ad, ad->batch_data_dir)) {
+fifo_expired:
+ rq = rq_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
+ }
+
+ if (ad->changed_batch) {
+ WARN_ON(ad->new_batch);
+
+ if (ad->nr_dispatched)
+ return 0;
+
+ if (ad->batch_data_dir == REQ_ASYNC)
+ ad->current_batch_expires = jiffies +
+ ad->batch_expire[REQ_ASYNC];
+ else
+ ad->new_batch = 1;
+
+ ad->changed_batch = 0;
+ }
+
+ /*
+ * rq is the selected appropriate request.
+ */
+ as_move_to_dispatch(ad, rq);
+
+ return 1;
+}
+
+/*
+ * add rq to rbtree and fifo
+ */
+static void as_add_request(struct request_queue *q, struct request *rq)
+{
+ struct as_data *ad = q->elevator->elevator_data;
+ int data_dir;
+
+ RQ_SET_STATE(rq, AS_RQ_NEW);
+
+ data_dir = rq_is_sync(rq);
+
+ rq->elevator_private = as_get_io_context(q->node);
+
+ if (RQ_IOC(rq)) {
+ as_update_iohist(ad, RQ_IOC(rq)->aic, rq);
+ atomic_inc(&RQ_IOC(rq)->aic->nr_queued);
+ }
+
+ as_add_rq_rb(ad, rq);
+
+ /*
+ * set expire time and add to fifo list
+ */
+ rq_set_fifo_time(rq, jiffies + ad->fifo_expire[data_dir]);
+ list_add_tail(&rq->queuelist, &ad->fifo_list[data_dir]);
+
+ as_update_rq(ad, rq); /* keep state machine up to date */
+ RQ_SET_STATE(rq, AS_RQ_QUEUED);
+}
+
+static void as_activate_request(struct request_queue *q, struct request *rq)
+{
+ WARN_ON(RQ_STATE(rq) != AS_RQ_DISPATCHED);
+ RQ_SET_STATE(rq, AS_RQ_REMOVED);
+ if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
+ atomic_dec(&RQ_IOC(rq)->aic->nr_dispatched);
+}
+
+static void as_deactivate_request(struct request_queue *q, struct request *rq)
+{
+ WARN_ON(RQ_STATE(rq) != AS_RQ_REMOVED);
+ RQ_SET_STATE(rq, AS_RQ_DISPATCHED);
+ if (RQ_IOC(rq) && RQ_IOC(rq)->aic)
+ atomic_inc(&RQ_IOC(rq)->aic->nr_dispatched);
+}
+
+/*
+ * as_queue_empty tells us if there are requests left in the device. It may
+ * not be the case that a driver can get the next request even if the queue
+ * is not empty - it is used in the block layer to check for plugging and
+ * merging opportunities
+ */
+static int as_queue_empty(struct request_queue *q)
+{
+ struct as_data *ad = q->elevator->elevator_data;
+
+ return list_empty(&ad->fifo_list[REQ_ASYNC])
+ && list_empty(&ad->fifo_list[REQ_SYNC]);
+}
+
+static int
+as_merge(struct request_queue *q, struct request **req, struct bio *bio)
+{
+ struct as_data *ad = q->elevator->elevator_data;
+ sector_t rb_key = bio->bi_sector + bio_sectors(bio);
+ struct request *__rq;
+
+ /*
+ * check for front merge
+ */
+ __rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key);
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
+ *req = __rq;
+ return ELEVATOR_FRONT_MERGE;
+ }
+
+ return ELEVATOR_NO_MERGE;
+}
+
+static void as_merged_request(struct request_queue *q, struct request *req,
+ int type)
+{
+ struct as_data *ad = q->elevator->elevator_data;
+
+ /*
+ * if the merge was a front merge, we need to reposition request
+ */
+ if (type == ELEVATOR_FRONT_MERGE) {
+ as_del_rq_rb(ad, req);
+ as_add_rq_rb(ad, req);
+ /*
+ * Note! At this stage of this and the next function, our next
+ * request may not be optimal - eg the request may have "grown"
+ * behind the disk head. We currently don't bother adjusting.
+ */
+ }
+}
+
+static void as_merged_requests(struct request_queue *q, struct request *req,
+ struct request *next)
+{
+ /*
+ * if next expires before rq, assign its expire time to arq
+ * and move into next position (next will be deleted) in fifo
+ */
+ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ list_move(&req->queuelist, &next->queuelist);
+ rq_set_fifo_time(req, rq_fifo_time(next));
+ }
+ }
+
+ /*
+ * kill knowledge of next, this one is a goner
+ */
+ as_remove_queued_request(q, next);
+ as_put_io_context(next);
+
+ RQ_SET_STATE(next, AS_RQ_MERGED);
+}
+
+/*
+ * This is executed in a "deferred" process context, by kblockd. It calls the
+ * driver's request_fn so the driver can submit that request.
+ *
+ * IMPORTANT! This guy will reenter the elevator, so set up all queue global
+ * state before calling, and don't rely on any state over calls.
+ *
+ * FIXME! dispatch queue is not a queue at all!
+ */
+static void as_work_handler(struct work_struct *work)
+{
+ struct as_data *ad = container_of(work, struct as_data, antic_work);
+ struct request_queue *q = ad->q;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queueing(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static int as_may_queue(struct request_queue *q, int rw)
+{
+ int ret = ELV_MQUEUE_MAY;
+ struct as_data *ad = q->elevator->elevator_data;
+ struct io_context *ioc;
+ if (ad->antic_status == ANTIC_WAIT_REQ ||
+ ad->antic_status == ANTIC_WAIT_NEXT) {
+ ioc = as_get_io_context(q->node);
+ if (ad->io_context == ioc)
+ ret = ELV_MQUEUE_MUST;
+ put_io_context(ioc);
+ }
+
+ return ret;
+}
+
+static void as_exit_queue(struct elevator_queue *e)
+{
+ struct as_data *ad = e->elevator_data;
+
+ del_timer_sync(&ad->antic_timer);
+ cancel_work_sync(&ad->antic_work);
+
+ BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
+ BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
+
+ put_io_context(ad->io_context);
+ kfree(ad);
+}
+
+/*
+ * initialize elevator private data (as_data).
+ */
+static void *as_init_queue(struct request_queue *q)
+{
+ struct as_data *ad;
+
+ ad = kmalloc_node(sizeof(*ad), GFP_KERNEL | __GFP_ZERO, q->node);
+ if (!ad)
+ return NULL;
+
+ ad->q = q; /* Identify what queue the data belongs to */
+
+ /* anticipatory scheduling helpers */
+ ad->antic_timer.function = as_antic_timeout;
+ ad->antic_timer.data = (unsigned long)q;
+ init_timer(&ad->antic_timer);
+ INIT_WORK(&ad->antic_work, as_work_handler);
+
+ INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
+ INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
+ ad->sort_list[REQ_SYNC] = RB_ROOT;
+ ad->sort_list[REQ_ASYNC] = RB_ROOT;
+ ad->fifo_expire[REQ_SYNC] = default_read_expire;
+ ad->fifo_expire[REQ_ASYNC] = default_write_expire;
+ ad->antic_expire = default_antic_expire;
+ ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
+ ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
+
+ ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
+ ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
+ if (ad->write_batch_count < 2)
+ ad->write_batch_count = 2;
+
+ return ad;
+}
+
+/*
+ * sysfs parts below
+ */
+
+static ssize_t
+as_var_show(unsigned int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+as_var_store(unsigned long *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtoul(p, &p, 10);
+ return count;
+}
+
+static ssize_t est_time_show(struct elevator_queue *e, char *page)
+{
+ struct as_data *ad = e->elevator_data;
+ int pos = 0;
+
+ pos += sprintf(page+pos, "%lu %% exit probability\n",
+ 100*ad->exit_prob/256);
+ pos += sprintf(page+pos, "%lu %% probability of exiting without a "
+ "cooperating process submitting IO\n",
+ 100*ad->exit_no_coop/256);
+ pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
+ pos += sprintf(page+pos, "%llu sectors new seek distance\n",
+ (unsigned long long)ad->new_seek_mean);
+
+ return pos;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct as_data *ad = e->elevator_data; \
+ return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
+}
+SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]);
+SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire);
+SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct as_data *ad = e->elevator_data; \
+ int ret = as_var_store(__PTR, (page), count); \
+ if (*(__PTR) < (MIN)) \
+ *(__PTR) = (MIN); \
+ else if (*(__PTR) > (MAX)) \
+ *(__PTR) = (MAX); \
+ *(__PTR) = msecs_to_jiffies(*(__PTR)); \
+ return ret; \
+}
+STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
+STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
+STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX);
+STORE_FUNCTION(as_read_batch_expire_store,
+ &ad->batch_expire[REQ_SYNC], 0, INT_MAX);
+STORE_FUNCTION(as_write_batch_expire_store,
+ &ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
+#undef STORE_FUNCTION
+
+#define AS_ATTR(name) \
+ __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store)
+
+static struct elv_fs_entry as_attrs[] = {
+ __ATTR_RO(est_time),
+ AS_ATTR(read_expire),
+ AS_ATTR(write_expire),
+ AS_ATTR(antic_expire),
+ AS_ATTR(read_batch_expire),
+ AS_ATTR(write_batch_expire),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_as = {
+ .ops = {
+ .elevator_merge_fn = as_merge,
+ .elevator_merged_fn = as_merged_request,
+ .elevator_merge_req_fn = as_merged_requests,
+ .elevator_dispatch_fn = as_dispatch_request,
+ .elevator_add_req_fn = as_add_request,
+ .elevator_activate_req_fn = as_activate_request,
+ .elevator_deactivate_req_fn = as_deactivate_request,
+ .elevator_queue_empty_fn = as_queue_empty,
+ .elevator_completed_req_fn = as_completed_request,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
+ .elevator_may_queue_fn = as_may_queue,
+ .elevator_init_fn = as_init_queue,
+ .elevator_exit_fn = as_exit_queue,
+ .trim = as_trim,
+ },
+
+ .elevator_attrs = as_attrs,
+ .elevator_name = "anticipatory",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init as_init(void)
+{
+ elv_register(&iosched_as);
+
+ return 0;
+}
+
+static void __exit as_exit(void)
+{
+ DECLARE_COMPLETION_ONSTACK(all_gone);
+ elv_unregister(&iosched_as);
+ ioc_gone = &all_gone;
+ /* ioc_gone's update must be visible before reading ioc_count */
+ smp_wmb();
+ if (elv_ioc_count_read(ioc_count))
+ wait_for_completion(&all_gone);
+ synchronize_rcu();
+}
+
+module_init(as_init);
+module_exit(as_exit);
+
+MODULE_AUTHOR("Nick Piggin");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("anticipatory IO scheduler");
diff --git a/libdde-linux26/contrib/block/blk-barrier.c b/libdde-linux26/contrib/block/blk-barrier.c
new file mode 100644
index 00000000..f7dae57e
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk-barrier.c
@@ -0,0 +1,419 @@
+/*
+ * Functions related to barrier IO handling
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+
+#include "blk.h"
+
+/**
+ * blk_queue_ordered - does this queue support ordered writes
+ * @q: the request queue
+ * @ordered: one of QUEUE_ORDERED_*
+ * @prepare_flush_fn: rq setup helper for cache flush ordered writes
+ *
+ * Description:
+ * For journalled file systems, doing ordered writes on a commit
+ * block instead of explicitly doing wait_on_buffer (which is bad
+ * for performance) can be a big win. Block drivers supporting this
+ * feature should call this function and indicate so.
+ *
+ **/
+int blk_queue_ordered(struct request_queue *q, unsigned ordered,
+ prepare_flush_fn *prepare_flush_fn)
+{
+ if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
+ QUEUE_ORDERED_DO_POSTFLUSH))) {
+ printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
+ return -EINVAL;
+ }
+
+ if (ordered != QUEUE_ORDERED_NONE &&
+ ordered != QUEUE_ORDERED_DRAIN &&
+ ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
+ ordered != QUEUE_ORDERED_DRAIN_FUA &&
+ ordered != QUEUE_ORDERED_TAG &&
+ ordered != QUEUE_ORDERED_TAG_FLUSH &&
+ ordered != QUEUE_ORDERED_TAG_FUA) {
+ printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
+ return -EINVAL;
+ }
+
+ q->ordered = ordered;
+ q->next_ordered = ordered;
+ q->prepare_flush_fn = prepare_flush_fn;
+
+ return 0;
+}
+EXPORT_SYMBOL(blk_queue_ordered);
+
+/*
+ * Cache flushing for ordered writes handling
+ */
+unsigned blk_ordered_cur_seq(struct request_queue *q)
+{
+ if (!q->ordseq)
+ return 0;
+ return 1 << ffz(q->ordseq);
+}
+
+unsigned blk_ordered_req_seq(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ BUG_ON(q->ordseq == 0);
+
+ if (rq == &q->pre_flush_rq)
+ return QUEUE_ORDSEQ_PREFLUSH;
+ if (rq == &q->bar_rq)
+ return QUEUE_ORDSEQ_BAR;
+ if (rq == &q->post_flush_rq)
+ return QUEUE_ORDSEQ_POSTFLUSH;
+
+ /*
+ * !fs requests don't need to follow barrier ordering. Always
+ * put them at the front. This fixes the following deadlock.
+ *
+ * http://thread.gmane.org/gmane.linux.kernel/537473
+ */
+ if (!blk_fs_request(rq))
+ return QUEUE_ORDSEQ_DRAIN;
+
+ if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
+ (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
+ return QUEUE_ORDSEQ_DRAIN;
+ else
+ return QUEUE_ORDSEQ_DONE;
+}
+
+bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
+{
+ struct request *rq;
+
+ if (error && !q->orderr)
+ q->orderr = error;
+
+ BUG_ON(q->ordseq & seq);
+ q->ordseq |= seq;
+
+ if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
+ return false;
+
+ /*
+ * Okay, sequence complete.
+ */
+ q->ordseq = 0;
+ rq = q->orig_bar_rq;
+
+ if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
+ BUG();
+
+ return true;
+}
+
+static void pre_flush_end_io(struct request *rq, int error)
+{
+ elv_completed_request(rq->q, rq);
+ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error);
+}
+
+static void bar_end_io(struct request *rq, int error)
+{
+ elv_completed_request(rq->q, rq);
+ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error);
+}
+
+static void post_flush_end_io(struct request *rq, int error)
+{
+ elv_completed_request(rq->q, rq);
+ blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error);
+}
+
+static void queue_flush(struct request_queue *q, unsigned which)
+{
+ struct request *rq;
+ rq_end_io_fn *end_io;
+
+ if (which == QUEUE_ORDERED_DO_PREFLUSH) {
+ rq = &q->pre_flush_rq;
+ end_io = pre_flush_end_io;
+ } else {
+ rq = &q->post_flush_rq;
+ end_io = post_flush_end_io;
+ }
+
+ blk_rq_init(q, rq);
+ rq->cmd_flags = REQ_HARDBARRIER;
+ rq->rq_disk = q->bar_rq.rq_disk;
+ rq->end_io = end_io;
+ q->prepare_flush_fn(q, rq);
+
+ elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+}
+
+static inline bool start_ordered(struct request_queue *q, struct request **rqp)
+{
+ struct request *rq = *rqp;
+ unsigned skip = 0;
+
+ q->orderr = 0;
+ q->ordered = q->next_ordered;
+ q->ordseq |= QUEUE_ORDSEQ_STARTED;
+
+ /*
+ * For an empty barrier, there's no actual BAR request, which
+ * in turn makes POSTFLUSH unnecessary. Mask them off.
+ */
+ if (!rq->hard_nr_sectors) {
+ q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
+ QUEUE_ORDERED_DO_POSTFLUSH);
+ /*
+ * Empty barrier on a write-through device w/ ordered
+ * tag has no command to issue and without any command
+ * to issue, ordering by tag can't be used. Drain
+ * instead.
+ */
+ if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
+ !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
+ q->ordered &= ~QUEUE_ORDERED_BY_TAG;
+ q->ordered |= QUEUE_ORDERED_BY_DRAIN;
+ }
+ }
+
+ /* stash away the original request */
+ elv_dequeue_request(q, rq);
+ q->orig_bar_rq = rq;
+ rq = NULL;
+
+ /*
+ * Queue ordered sequence. As we stack them at the head, we
+ * need to queue in reverse order. Note that we rely on that
+ * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
+ * request gets inbetween ordered sequence.
+ */
+ if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
+ queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
+ rq = &q->post_flush_rq;
+ } else
+ skip |= QUEUE_ORDSEQ_POSTFLUSH;
+
+ if (q->ordered & QUEUE_ORDERED_DO_BAR) {
+ rq = &q->bar_rq;
+
+ /* initialize proxy request and queue it */
+ blk_rq_init(q, rq);
+ if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
+ rq->cmd_flags |= REQ_RW;
+ if (q->ordered & QUEUE_ORDERED_DO_FUA)
+ rq->cmd_flags |= REQ_FUA;
+ init_request_from_bio(rq, q->orig_bar_rq->bio);
+ rq->end_io = bar_end_io;
+
+ elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
+ } else
+ skip |= QUEUE_ORDSEQ_BAR;
+
+ if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
+ queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
+ rq = &q->pre_flush_rq;
+ } else
+ skip |= QUEUE_ORDSEQ_PREFLUSH;
+
+ if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
+ rq = NULL;
+ else
+ skip |= QUEUE_ORDSEQ_DRAIN;
+
+ *rqp = rq;
+
+ /*
+ * Complete skipped sequences. If whole sequence is complete,
+ * return false to tell elevator that this request is gone.
+ */
+ return !blk_ordered_complete_seq(q, skip, 0);
+}
+
+bool blk_do_ordered(struct request_queue *q, struct request **rqp)
+{
+ struct request *rq = *rqp;
+ const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
+
+ if (!q->ordseq) {
+ if (!is_barrier)
+ return true;
+
+ if (q->next_ordered != QUEUE_ORDERED_NONE)
+ return start_ordered(q, rqp);
+ else {
+ /*
+ * Queue ordering not supported. Terminate
+ * with prejudice.
+ */
+ elv_dequeue_request(q, rq);
+ if (__blk_end_request(rq, -EOPNOTSUPP,
+ blk_rq_bytes(rq)))
+ BUG();
+ *rqp = NULL;
+ return false;
+ }
+ }
+
+ /*
+ * Ordered sequence in progress
+ */
+
+ /* Special requests are not subject to ordering rules. */
+ if (!blk_fs_request(rq) &&
+ rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
+ return true;
+
+ if (q->ordered & QUEUE_ORDERED_BY_TAG) {
+ /* Ordered by tag. Blocking the next barrier is enough. */
+ if (is_barrier && rq != &q->bar_rq)
+ *rqp = NULL;
+ } else {
+ /* Ordered by draining. Wait for turn. */
+ WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
+ if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
+ *rqp = NULL;
+ }
+
+ return true;
+}
+
+static void bio_end_empty_barrier(struct bio *bio, int err)
+{
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
+
+ complete(bio->bi_private);
+}
+
+/**
+ * blkdev_issue_flush - queue a flush
+ * @bdev: blockdev to issue flush for
+ * @error_sector: error sector
+ *
+ * Description:
+ * Issue a flush for the block device in question. Caller can supply
+ * room for storing the error offset in case of a flush error, if they
+ * wish to.
+ */
+int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q;
+ struct bio *bio;
+ int ret;
+
+ if (bdev->bd_disk == NULL)
+ return -ENXIO;
+
+ q = bdev_get_queue(bdev);
+ if (!q)
+ return -ENXIO;
+
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_end_io = bio_end_empty_barrier;
+ bio->bi_private = &wait;
+ bio->bi_bdev = bdev;
+ submit_bio(WRITE_BARRIER, bio);
+
+ wait_for_completion(&wait);
+
+ /*
+ * The driver must store the error location in ->bi_sector, if
+ * it supports it. For non-stacked drivers, this should be copied
+ * from rq->sector.
+ */
+ if (error_sector)
+ *error_sector = bio->bi_sector;
+
+ ret = 0;
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ else if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+
+ bio_put(bio);
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_flush);
+
+static void blkdev_discard_end_io(struct bio *bio, int err)
+{
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
+
+ bio_put(bio);
+}
+
+/**
+ * blkdev_issue_discard - queue a discard
+ * @bdev: blockdev to issue discard for
+ * @sector: start sector
+ * @nr_sects: number of sectors to discard
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Issue a discard request for the sectors in question. Does not wait.
+ */
+int blkdev_issue_discard(struct block_device *bdev,
+ sector_t sector, sector_t nr_sects, gfp_t gfp_mask)
+{
+ struct request_queue *q;
+ struct bio *bio;
+ int ret = 0;
+
+ if (bdev->bd_disk == NULL)
+ return -ENXIO;
+
+ q = bdev_get_queue(bdev);
+ if (!q)
+ return -ENXIO;
+
+ if (!q->prepare_discard_fn)
+ return -EOPNOTSUPP;
+
+ while (nr_sects && !ret) {
+ bio = bio_alloc(gfp_mask, 0);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_end_io = blkdev_discard_end_io;
+ bio->bi_bdev = bdev;
+
+ bio->bi_sector = sector;
+
+ if (nr_sects > q->max_hw_sectors) {
+ bio->bi_size = q->max_hw_sectors << 9;
+ nr_sects -= q->max_hw_sectors;
+ sector += q->max_hw_sectors;
+ } else {
+ bio->bi_size = nr_sects << 9;
+ nr_sects = 0;
+ }
+ bio_get(bio);
+ submit_bio(DISCARD_BARRIER, bio);
+
+ /* Check if it failed immediately */
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ else if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+ bio_put(bio);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_discard);
diff --git a/libdde-linux26/contrib/block/blk-exec.c b/libdde-linux26/contrib/block/blk-exec.c
new file mode 100644
index 00000000..6af716d1
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk-exec.c
@@ -0,0 +1,106 @@
+/*
+ * Functions related to setting various queue properties from drivers
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+
+#include "blk.h"
+
+/*
+ * for max sense size
+ */
+#include <scsi/scsi_cmnd.h>
+
+/**
+ * blk_end_sync_rq - executes a completion event on a request
+ * @rq: request to complete
+ * @error: end I/O status of the request
+ */
+static void blk_end_sync_rq(struct request *rq, int error)
+{
+ struct completion *waiting = rq->end_io_data;
+
+ rq->end_io_data = NULL;
+ __blk_put_request(rq->q, rq);
+
+ /*
+ * complete last, if this is a stack request the process (and thus
+ * the rq pointer) could be invalid right after this complete()
+ */
+ complete(waiting);
+}
+
+/**
+ * blk_execute_rq_nowait - insert a request into queue for execution
+ * @q: queue to insert the request in
+ * @bd_disk: matching gendisk
+ * @rq: request to insert
+ * @at_head: insert request at head or tail of queue
+ * @done: I/O completion handler
+ *
+ * Description:
+ * Insert a fully prepared request at the back of the I/O scheduler queue
+ * for execution. Don't wait for completion.
+ */
+void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
+ struct request *rq, int at_head,
+ rq_end_io_fn *done)
+{
+ int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+
+ rq->rq_disk = bd_disk;
+ rq->cmd_flags |= REQ_NOMERGE;
+ rq->end_io = done;
+ WARN_ON(irqs_disabled());
+ spin_lock_irq(q->queue_lock);
+ __elv_add_request(q, rq, where, 1);
+ __generic_unplug_device(q);
+ /* the queue is stopped so it won't be plugged+unplugged */
+ if (blk_pm_resume_request(rq))
+ q->request_fn(q);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+
+/**
+ * blk_execute_rq - insert a request into queue for execution
+ * @q: queue to insert the request in
+ * @bd_disk: matching gendisk
+ * @rq: request to insert
+ * @at_head: insert request at head or tail of queue
+ *
+ * Description:
+ * Insert a fully prepared request at the back of the I/O scheduler queue
+ * for execution and wait for completion.
+ */
+int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
+ struct request *rq, int at_head)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ char sense[SCSI_SENSE_BUFFERSIZE];
+ int err = 0;
+
+ /*
+ * we need an extra reference to the request, so we can look at
+ * it after io completion
+ */
+ rq->ref_count++;
+
+ if (!rq->sense) {
+ memset(sense, 0, sizeof(sense));
+ rq->sense = sense;
+ rq->sense_len = 0;
+ }
+
+ rq->end_io_data = &wait;
+ blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
+ wait_for_completion(&wait);
+
+ if (rq->errors)
+ err = -EIO;
+
+ return err;
+}
+EXPORT_SYMBOL(blk_execute_rq);
diff --git a/libdde-linux26/contrib/block/blk-ioc.c b/libdde-linux26/contrib/block/blk-ioc.c
new file mode 100644
index 00000000..e7235861
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk-ioc.c
@@ -0,0 +1,182 @@
+/*
+ * Functions related to io context handling
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+
+#include "blk.h"
+
+#include <ddekit/timer.h>
+
+/*
+ * For io context allocations
+ */
+static struct kmem_cache *iocontext_cachep;
+
+static void cfq_dtor(struct io_context *ioc)
+{
+ if (!hlist_empty(&ioc->cic_list)) {
+ struct cfq_io_context *cic;
+
+ cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
+ cic_list);
+ cic->dtor(ioc);
+ }
+}
+
+/*
+ * IO Context helper functions. put_io_context() returns 1 if there are no
+ * more users of this io context, 0 otherwise.
+ */
+int put_io_context(struct io_context *ioc)
+{
+ if (ioc == NULL)
+ return 1;
+
+ BUG_ON(atomic_read(&ioc->refcount) == 0);
+
+ if (atomic_dec_and_test(&ioc->refcount)) {
+ rcu_read_lock();
+ if (ioc->aic && ioc->aic->dtor)
+ ioc->aic->dtor(ioc->aic);
+ cfq_dtor(ioc);
+ rcu_read_unlock();
+
+ kmem_cache_free(iocontext_cachep, ioc);
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(put_io_context);
+
+static void cfq_exit(struct io_context *ioc)
+{
+ rcu_read_lock();
+
+ if (!hlist_empty(&ioc->cic_list)) {
+ struct cfq_io_context *cic;
+
+ cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
+ cic_list);
+ cic->exit(ioc);
+ }
+ rcu_read_unlock();
+}
+
+/* Called by the exitting task */
+void exit_io_context(void)
+{
+ struct io_context *ioc;
+
+ task_lock(current);
+ ioc = current->io_context;
+ current->io_context = NULL;
+ task_unlock(current);
+
+ if (atomic_dec_and_test(&ioc->nr_tasks)) {
+ if (ioc->aic && ioc->aic->exit)
+ ioc->aic->exit(ioc->aic);
+ cfq_exit(ioc);
+
+ put_io_context(ioc);
+ }
+}
+
+struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
+{
+ struct io_context *ret;
+
+ ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
+ if (ret) {
+ atomic_set(&ret->refcount, 1);
+ atomic_set(&ret->nr_tasks, 1);
+ spin_lock_init(&ret->lock);
+ ret->ioprio_changed = 0;
+ ret->ioprio = 0;
+ ret->last_waited = jiffies; /* doesn't matter... */
+ ret->nr_batch_requests = 0; /* because this is 0 */
+ ret->aic = NULL;
+ INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
+ INIT_HLIST_HEAD(&ret->cic_list);
+ ret->ioc_data = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * If the current task has no IO context then create one and initialise it.
+ * Otherwise, return its existing IO context.
+ *
+ * This returned IO context doesn't have a specifically elevated refcount,
+ * but since the current task itself holds a reference, the context can be
+ * used in general code, so long as it stays within `current` context.
+ */
+struct io_context *current_io_context(gfp_t gfp_flags, int node)
+{
+ struct task_struct *tsk = current;
+ struct io_context *ret;
+
+ ret = tsk->io_context;
+ if (likely(ret))
+ return ret;
+
+ ret = alloc_io_context(gfp_flags, node);
+ if (ret) {
+ /* make sure set_task_ioprio() sees the settings above */
+ smp_wmb();
+ tsk->io_context = ret;
+ }
+
+ return ret;
+}
+
+/*
+ * If the current task has no IO context then create one and initialise it.
+ * If it does have a context, take a ref on it.
+ *
+ * This is always called in the context of the task which submitted the I/O.
+ */
+struct io_context *get_io_context(gfp_t gfp_flags, int node)
+{
+ struct io_context *ret = NULL;
+
+ /*
+ * Check for unlikely race with exiting task. ioc ref count is
+ * zero when ioc is being detached.
+ */
+ do {
+ ret = current_io_context(gfp_flags, node);
+ if (unlikely(!ret))
+ break;
+ } while (!atomic_inc_not_zero(&ret->refcount));
+
+ return ret;
+}
+EXPORT_SYMBOL(get_io_context);
+
+void copy_io_context(struct io_context **pdst, struct io_context **psrc)
+{
+ struct io_context *src = *psrc;
+ struct io_context *dst = *pdst;
+
+ if (src) {
+ BUG_ON(atomic_read(&src->refcount) == 0);
+ atomic_inc(&src->refcount);
+ put_io_context(dst);
+ *pdst = src;
+ }
+}
+EXPORT_SYMBOL(copy_io_context);
+
+static int __init blk_ioc_init(void)
+{
+ iocontext_cachep = kmem_cache_create("blkdev_ioc",
+ sizeof(struct io_context), 0, SLAB_PANIC, NULL);
+ return 0;
+}
+subsys_initcall(blk_ioc_init);
diff --git a/libdde-linux26/contrib/block/blk-merge.c b/libdde-linux26/contrib/block/blk-merge.c
new file mode 100644
index 00000000..5a244f05
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk-merge.c
@@ -0,0 +1,428 @@
+/*
+ * Functions related to segment and merge handling
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/scatterlist.h>
+
+#include "blk.h"
+
+void blk_recalc_rq_sectors(struct request *rq, int nsect)
+{
+ if (blk_fs_request(rq) || blk_discard_rq(rq)) {
+ rq->hard_sector += nsect;
+ rq->hard_nr_sectors -= nsect;
+
+ /*
+ * Move the I/O submission pointers ahead if required.
+ */
+ if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
+ (rq->sector <= rq->hard_sector)) {
+ rq->sector = rq->hard_sector;
+ rq->nr_sectors = rq->hard_nr_sectors;
+ rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
+ rq->current_nr_sectors = rq->hard_cur_sectors;
+ rq->buffer = bio_data(rq->bio);
+ }
+
+ /*
+ * if total number of sectors is less than the first segment
+ * size, something has gone terribly wrong
+ */
+ if (rq->nr_sectors < rq->current_nr_sectors) {
+ printk(KERN_ERR "blk: request botched\n");
+ rq->nr_sectors = rq->current_nr_sectors;
+ }
+ }
+}
+
+static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
+ struct bio *bio)
+{
+ unsigned int phys_size;
+ struct bio_vec *bv, *bvprv = NULL;
+ int cluster, i, high, highprv = 1;
+ unsigned int seg_size, nr_phys_segs;
+ struct bio *fbio, *bbio;
+
+ if (!bio)
+ return 0;
+
+ fbio = bio;
+ cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+ seg_size = 0;
+ phys_size = nr_phys_segs = 0;
+ for_each_bio(bio) {
+ bio_for_each_segment(bv, bio, i) {
+ /*
+ * the trick here is making sure that a high page is
+ * never considered part of another segment, since that
+ * might change with the bounce page.
+ */
+ high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+ if (high || highprv)
+ goto new_segment;
+ if (cluster) {
+ if (seg_size + bv->bv_len > q->max_segment_size)
+ goto new_segment;
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+ goto new_segment;
+
+ seg_size += bv->bv_len;
+ bvprv = bv;
+ continue;
+ }
+new_segment:
+ if (nr_phys_segs == 1 && seg_size >
+ fbio->bi_seg_front_size)
+ fbio->bi_seg_front_size = seg_size;
+
+ nr_phys_segs++;
+ bvprv = bv;
+ seg_size = bv->bv_len;
+ highprv = high;
+ }
+ bbio = bio;
+ }
+
+ if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size)
+ fbio->bi_seg_front_size = seg_size;
+ if (seg_size > bbio->bi_seg_back_size)
+ bbio->bi_seg_back_size = seg_size;
+
+ return nr_phys_segs;
+}
+
+void blk_recalc_rq_segments(struct request *rq)
+{
+ rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio);
+}
+
+void blk_recount_segments(struct request_queue *q, struct bio *bio)
+{
+ struct bio *nxt = bio->bi_next;
+
+ bio->bi_next = NULL;
+ bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio);
+ bio->bi_next = nxt;
+ bio->bi_flags |= (1 << BIO_SEG_VALID);
+}
+EXPORT_SYMBOL(blk_recount_segments);
+
+static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
+ struct bio *nxt)
+{
+ if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
+ return 0;
+
+ if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
+ q->max_segment_size)
+ return 0;
+
+ if (!bio_has_data(bio))
+ return 1;
+
+ if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+ return 0;
+
+ /*
+ * bio and nxt are contiguous in memory; check if the queue allows
+ * these two to be merged into one
+ */
+ if (BIO_SEG_BOUNDARY(q, bio, nxt))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct bio_vec *bvec, *bvprv;
+ struct req_iterator iter;
+ struct scatterlist *sg;
+ int nsegs, cluster;
+
+ nsegs = 0;
+ cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+
+ /*
+ * for each bio in rq
+ */
+ bvprv = NULL;
+ sg = NULL;
+ rq_for_each_segment(bvec, rq, iter) {
+ int nbytes = bvec->bv_len;
+
+ if (bvprv && cluster) {
+ if (sg->length + nbytes > q->max_segment_size)
+ goto new_segment;
+
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+ goto new_segment;
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+ goto new_segment;
+
+ sg->length += nbytes;
+ } else {
+new_segment:
+ if (!sg)
+ sg = sglist;
+ else {
+ /*
+ * If the driver previously mapped a shorter
+ * list, we could see a termination bit
+ * prematurely unless it fully inits the sg
+ * table on each mapping. We KNOW that there
+ * must be more entries here or the driver
+ * would be buggy, so force clear the
+ * termination bit to avoid doing a full
+ * sg_init_table() in drivers for each command.
+ */
+ sg->page_link &= ~0x02;
+ sg = sg_next(sg);
+ }
+
+ sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
+ nsegs++;
+ }
+ bvprv = bvec;
+ } /* segments in rq */
+
+
+ if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
+ (rq->data_len & q->dma_pad_mask)) {
+ unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
+
+ sg->length += pad_len;
+ rq->extra_len += pad_len;
+ }
+
+ if (q->dma_drain_size && q->dma_drain_needed(rq)) {
+ if (rq->cmd_flags & REQ_RW)
+ memset(q->dma_drain_buffer, 0, q->dma_drain_size);
+
+ sg->page_link &= ~0x02;
+ sg = sg_next(sg);
+ sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
+ q->dma_drain_size,
+ ((unsigned long)q->dma_drain_buffer) &
+ (PAGE_SIZE - 1));
+ nsegs++;
+ rq->extra_len += q->dma_drain_size;
+ }
+
+ if (sg)
+ sg_mark_end(sg);
+
+ return nsegs;
+}
+EXPORT_SYMBOL(blk_rq_map_sg);
+
+static inline int ll_new_hw_segment(struct request_queue *q,
+ struct request *req,
+ struct bio *bio)
+{
+ int nr_phys_segs = bio_phys_segments(q, bio);
+
+ if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
+ || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+ req->cmd_flags |= REQ_NOMERGE;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
+ return 0;
+ }
+
+ /*
+ * This will form the start of a new hw segment. Bump both
+ * counters.
+ */
+ req->nr_phys_segments += nr_phys_segs;
+ return 1;
+}
+
+int ll_back_merge_fn(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ unsigned short max_sectors;
+
+ if (unlikely(blk_pc_request(req)))
+ max_sectors = q->max_hw_sectors;
+ else
+ max_sectors = q->max_sectors;
+
+ if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+ req->cmd_flags |= REQ_NOMERGE;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
+ return 0;
+ }
+ if (!bio_flagged(req->biotail, BIO_SEG_VALID))
+ blk_recount_segments(q, req->biotail);
+ if (!bio_flagged(bio, BIO_SEG_VALID))
+ blk_recount_segments(q, bio);
+
+ return ll_new_hw_segment(q, req, bio);
+}
+
+int ll_front_merge_fn(struct request_queue *q, struct request *req,
+ struct bio *bio)
+{
+ unsigned short max_sectors;
+
+ if (unlikely(blk_pc_request(req)))
+ max_sectors = q->max_hw_sectors;
+ else
+ max_sectors = q->max_sectors;
+
+
+ if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
+ req->cmd_flags |= REQ_NOMERGE;
+ if (req == q->last_merge)
+ q->last_merge = NULL;
+ return 0;
+ }
+ if (!bio_flagged(bio, BIO_SEG_VALID))
+ blk_recount_segments(q, bio);
+ if (!bio_flagged(req->bio, BIO_SEG_VALID))
+ blk_recount_segments(q, req->bio);
+
+ return ll_new_hw_segment(q, req, bio);
+}
+
+static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
+ struct request *next)
+{
+ int total_phys_segments;
+ unsigned int seg_size =
+ req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
+
+ /*
+ * First check if the either of the requests are re-queued
+ * requests. Can't merge them if they are.
+ */
+ if (req->special || next->special)
+ return 0;
+
+ /*
+ * Will it become too large?
+ */
+ if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+ return 0;
+
+ total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+ if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
+ if (req->nr_phys_segments == 1)
+ req->bio->bi_seg_front_size = seg_size;
+ if (next->nr_phys_segments == 1)
+ next->biotail->bi_seg_back_size = seg_size;
+ total_phys_segments--;
+ }
+
+ if (total_phys_segments > q->max_phys_segments)
+ return 0;
+
+ if (total_phys_segments > q->max_hw_segments)
+ return 0;
+
+ /* Merge is OK... */
+ req->nr_phys_segments = total_phys_segments;
+ return 1;
+}
+
+/*
+ * Has to be called with the request spinlock acquired
+ */
+static int attempt_merge(struct request_queue *q, struct request *req,
+ struct request *next)
+{
+ if (!rq_mergeable(req) || !rq_mergeable(next))
+ return 0;
+
+ /*
+ * not contiguous
+ */
+ if (req->sector + req->nr_sectors != next->sector)
+ return 0;
+
+ if (rq_data_dir(req) != rq_data_dir(next)
+ || req->rq_disk != next->rq_disk
+ || next->special)
+ return 0;
+
+ if (blk_integrity_rq(req) != blk_integrity_rq(next))
+ return 0;
+
+ /*
+ * If we are allowed to merge, then append bio list
+ * from next to rq and release next. merge_requests_fn
+ * will have updated segment counts, update sector
+ * counts here.
+ */
+ if (!ll_merge_requests_fn(q, req, next))
+ return 0;
+
+ /*
+ * At this point we have either done a back merge
+ * or front merge. We need the smaller start_time of
+ * the merged requests to be the current request
+ * for accounting purposes.
+ */
+ if (time_after(req->start_time, next->start_time))
+ req->start_time = next->start_time;
+
+ req->biotail->bi_next = next->bio;
+ req->biotail = next->biotail;
+
+ req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+
+ elv_merge_requests(q, req, next);
+
+ if (req->rq_disk) {
+ struct hd_struct *part;
+ int cpu;
+
+ cpu = part_stat_lock();
+ part = disk_map_sector_rcu(req->rq_disk, req->sector);
+
+ part_round_stats(cpu, part);
+ part_dec_in_flight(part);
+
+ part_stat_unlock();
+ }
+
+ req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+ if (blk_rq_cpu_valid(next))
+ req->cpu = next->cpu;
+
+ __blk_put_request(q, next);
+ return 1;
+}
+
+int attempt_back_merge(struct request_queue *q, struct request *rq)
+{
+ struct request *next = elv_latter_request(q, rq);
+
+ if (next)
+ return attempt_merge(q, rq, next);
+
+ return 0;
+}
+
+int attempt_front_merge(struct request_queue *q, struct request *rq)
+{
+ struct request *prev = elv_former_request(q, rq);
+
+ if (prev)
+ return attempt_merge(q, prev, rq);
+
+ return 0;
+}
diff --git a/libdde-linux26/contrib/block/blk-settings.c b/libdde-linux26/contrib/block/blk-settings.c
new file mode 100644
index 00000000..61510191
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk-settings.c
@@ -0,0 +1,474 @@
+/*
+ * Functions related to setting various queue properties from drivers
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
+
+#include "blk.h"
+
+#include <ddekit/timer.h>
+
+unsigned long blk_max_low_pfn;
+EXPORT_SYMBOL(blk_max_low_pfn);
+
+unsigned long blk_max_pfn;
+
+/**
+ * blk_queue_prep_rq - set a prepare_request function for queue
+ * @q: queue
+ * @pfn: prepare_request function
+ *
+ * It's possible for a queue to register a prepare_request callback which
+ * is invoked before the request is handed to the request_fn. The goal of
+ * the function is to prepare a request for I/O, it can be used to build a
+ * cdb from the request data for instance.
+ *
+ */
+void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
+{
+ q->prep_rq_fn = pfn;
+}
+EXPORT_SYMBOL(blk_queue_prep_rq);
+
+/**
+ * blk_queue_set_discard - set a discard_sectors function for queue
+ * @q: queue
+ * @dfn: prepare_discard function
+ *
+ * It's possible for a queue to register a discard callback which is used
+ * to transform a discard request into the appropriate type for the
+ * hardware. If none is registered, then discard requests are failed
+ * with %EOPNOTSUPP.
+ *
+ */
+void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn)
+{
+ q->prepare_discard_fn = dfn;
+}
+EXPORT_SYMBOL(blk_queue_set_discard);
+
+/**
+ * blk_queue_merge_bvec - set a merge_bvec function for queue
+ * @q: queue
+ * @mbfn: merge_bvec_fn
+ *
+ * Usually queues have static limitations on the max sectors or segments that
+ * we can put in a request. Stacking drivers may have some settings that
+ * are dynamic, and thus we have to query the queue whether it is ok to
+ * add a new bio_vec to a bio at a given offset or not. If the block device
+ * has such limitations, it needs to register a merge_bvec_fn to control
+ * the size of bio's sent to it. Note that a block device *must* allow a
+ * single page to be added to an empty bio. The block device driver may want
+ * to use the bio_split() function to deal with these bio's. By default
+ * no merge_bvec_fn is defined for a queue, and only the fixed limits are
+ * honored.
+ */
+void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
+{
+ q->merge_bvec_fn = mbfn;
+}
+EXPORT_SYMBOL(blk_queue_merge_bvec);
+
+void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
+{
+ q->softirq_done_fn = fn;
+}
+EXPORT_SYMBOL(blk_queue_softirq_done);
+
+void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
+{
+ q->rq_timeout = timeout;
+}
+EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
+
+void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
+{
+ q->rq_timed_out_fn = fn;
+}
+EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
+
+void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
+{
+ q->lld_busy_fn = fn;
+}
+EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
+
+/**
+ * blk_queue_make_request - define an alternate make_request function for a device
+ * @q: the request queue for the device to be affected
+ * @mfn: the alternate make_request function
+ *
+ * Description:
+ * The normal way for &struct bios to be passed to a device
+ * driver is for them to be collected into requests on a request
+ * queue, and then to allow the device driver to select requests
+ * off that queue when it is ready. This works well for many block
+ * devices. However some block devices (typically virtual devices
+ * such as md or lvm) do not benefit from the processing on the
+ * request queue, and are served best by having the requests passed
+ * directly to them. This can be achieved by providing a function
+ * to blk_queue_make_request().
+ *
+ * Caveat:
+ * The driver that does this *must* be able to deal appropriately
+ * with buffers in "highmemory". This can be accomplished by either calling
+ * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
+ * blk_queue_bounce() to create a buffer in normal memory.
+ **/
+void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
+{
+ /*
+ * set defaults
+ */
+ q->nr_requests = BLKDEV_MAX_RQ;
+ blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+ blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+ blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
+ blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
+
+ q->make_request_fn = mfn;
+ q->backing_dev_info.ra_pages =
+ (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+ q->backing_dev_info.state = 0;
+ q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+ blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
+ blk_queue_hardsect_size(q, 512);
+ blk_queue_dma_alignment(q, 511);
+ blk_queue_congestion_threshold(q);
+ q->nr_batching = BLK_BATCH_REQ;
+
+ q->unplug_thresh = 4; /* hmm */
+ q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */
+ if (q->unplug_delay == 0)
+ q->unplug_delay = 1;
+
+ q->unplug_timer.function = blk_unplug_timeout;
+ q->unplug_timer.data = (unsigned long)q;
+
+ /*
+ * by default assume old behaviour and bounce for any highmem page
+ */
+ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+}
+EXPORT_SYMBOL(blk_queue_make_request);
+
+/**
+ * blk_queue_bounce_limit - set bounce buffer limit for queue
+ * @q: the request queue for the device
+ * @dma_addr: bus address limit
+ *
+ * Description:
+ * Different hardware can have different requirements as to what pages
+ * it can do I/O directly to. A low level driver can call
+ * blk_queue_bounce_limit to have lower memory pages allocated as bounce
+ * buffers for doing I/O to pages residing above @dma_addr.
+ **/
+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
+{
+ unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
+ int dma = 0;
+
+ q->bounce_gfp = GFP_NOIO;
+#if BITS_PER_LONG == 64
+ /* Assume anything <= 4GB can be handled by IOMMU.
+ Actually some IOMMUs can handle everything, but I don't
+ know of a way to test this here. */
+ if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+ dma = 1;
+ q->bounce_pfn = max_low_pfn;
+#else
+ if (b_pfn < blk_max_low_pfn)
+ dma = 1;
+ q->bounce_pfn = b_pfn;
+#endif
+ if (dma) {
+ init_emergency_isa_pool();
+ q->bounce_gfp = GFP_NOIO | GFP_DMA;
+ q->bounce_pfn = b_pfn;
+ }
+}
+EXPORT_SYMBOL(blk_queue_bounce_limit);
+
+/**
+ * blk_queue_max_sectors - set max sectors for a request for this queue
+ * @q: the request queue for the device
+ * @max_sectors: max sectors in the usual 512b unit
+ *
+ * Description:
+ * Enables a low level driver to set an upper limit on the size of
+ * received requests.
+ **/
+void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
+{
+ if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
+ max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_sectors);
+ }
+
+ if (BLK_DEF_MAX_SECTORS > max_sectors)
+ q->max_hw_sectors = q->max_sectors = max_sectors;
+ else {
+ q->max_sectors = BLK_DEF_MAX_SECTORS;
+ q->max_hw_sectors = max_sectors;
+ }
+}
+EXPORT_SYMBOL(blk_queue_max_sectors);
+
+/**
+ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
+ * @q: the request queue for the device
+ * @max_segments: max number of segments
+ *
+ * Description:
+ * Enables a low level driver to set an upper limit on the number of
+ * physical data segments in a request. This would be the largest sized
+ * scatter list the driver could handle.
+ **/
+void blk_queue_max_phys_segments(struct request_queue *q,
+ unsigned short max_segments)
+{
+ if (!max_segments) {
+ max_segments = 1;
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_segments);
+ }
+
+ q->max_phys_segments = max_segments;
+}
+EXPORT_SYMBOL(blk_queue_max_phys_segments);
+
+/**
+ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
+ * @q: the request queue for the device
+ * @max_segments: max number of segments
+ *
+ * Description:
+ * Enables a low level driver to set an upper limit on the number of
+ * hw data segments in a request. This would be the largest number of
+ * address/length pairs the host adapter can actually give at once
+ * to the device.
+ **/
+void blk_queue_max_hw_segments(struct request_queue *q,
+ unsigned short max_segments)
+{
+ if (!max_segments) {
+ max_segments = 1;
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_segments);
+ }
+
+ q->max_hw_segments = max_segments;
+}
+EXPORT_SYMBOL(blk_queue_max_hw_segments);
+
+/**
+ * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
+ * @q: the request queue for the device
+ * @max_size: max size of segment in bytes
+ *
+ * Description:
+ * Enables a low level driver to set an upper limit on the size of a
+ * coalesced segment
+ **/
+void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
+{
+ if (max_size < PAGE_CACHE_SIZE) {
+ max_size = PAGE_CACHE_SIZE;
+ printk(KERN_INFO "%s: set to minimum %d\n",
+ __func__, max_size);
+ }
+
+ q->max_segment_size = max_size;
+}
+EXPORT_SYMBOL(blk_queue_max_segment_size);
+
+/**
+ * blk_queue_hardsect_size - set hardware sector size for the queue
+ * @q: the request queue for the device
+ * @size: the hardware sector size, in bytes
+ *
+ * Description:
+ * This should typically be set to the lowest possible sector size
+ * that the hardware can operate on (possible without reverting to
+ * even internal read-modify-write operations). Usually the default
+ * of 512 covers most hardware.
+ **/
+void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
+{
+ q->hardsect_size = size;
+}
+EXPORT_SYMBOL(blk_queue_hardsect_size);
+
+/*
+ * Returns the minimum that is _not_ zero, unless both are zero.
+ */
+#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
+
+/**
+ * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
+ * @t: the stacking driver (top)
+ * @b: the underlying device (bottom)
+ **/
+void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
+{
+ /* zero is "infinity" */
+ t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
+ t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+ t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
+
+ t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
+ t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
+ t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
+ t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
+ if (!t->queue_lock)
+ WARN_ON_ONCE(1);
+ else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
+ unsigned long flags;
+ spin_lock_irqsave(t->queue_lock, flags);
+ queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
+ spin_unlock_irqrestore(t->queue_lock, flags);
+ }
+}
+EXPORT_SYMBOL(blk_queue_stack_limits);
+
+/**
+ * blk_queue_dma_pad - set pad mask
+ * @q: the request queue for the device
+ * @mask: pad mask
+ *
+ * Set dma pad mask.
+ *
+ * Appending pad buffer to a request modifies the last entry of a
+ * scatter list such that it includes the pad buffer.
+ **/
+void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
+{
+ q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_dma_pad);
+
+/**
+ * blk_queue_update_dma_pad - update pad mask
+ * @q: the request queue for the device
+ * @mask: pad mask
+ *
+ * Update dma pad mask.
+ *
+ * Appending pad buffer to a request modifies the last entry of a
+ * scatter list such that it includes the pad buffer.
+ **/
+void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
+{
+ if (mask > q->dma_pad_mask)
+ q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_update_dma_pad);
+
+/**
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
+ * @q: the request queue for the device
+ * @dma_drain_needed: fn which returns non-zero if drain is necessary
+ * @buf: physically contiguous buffer
+ * @size: size of the buffer in bytes
+ *
+ * Some devices have excess DMA problems and can't simply discard (or
+ * zero fill) the unwanted piece of the transfer. They have to have a
+ * real area of memory to transfer it into. The use case for this is
+ * ATAPI devices in DMA mode. If the packet command causes a transfer
+ * bigger than the transfer size some HBAs will lock up if there
+ * aren't DMA elements to contain the excess transfer. What this API
+ * does is adjust the queue so that the buf is always appended
+ * silently to the scatterlist.
+ *
+ * Note: This routine adjusts max_hw_segments to make room for
+ * appending the drain buffer. If you call
+ * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
+ * calling this routine, you must set the limit to one fewer than your
+ * device can support otherwise there won't be room for the drain
+ * buffer.
+ */
+int blk_queue_dma_drain(struct request_queue *q,
+ dma_drain_needed_fn *dma_drain_needed,
+ void *buf, unsigned int size)
+{
+ if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
+ return -EINVAL;
+ /* make room for appending the drain */
+ --q->max_hw_segments;
+ --q->max_phys_segments;
+ q->dma_drain_needed = dma_drain_needed;
+ q->dma_drain_buffer = buf;
+ q->dma_drain_size = size;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
+
+/**
+ * blk_queue_segment_boundary - set boundary rules for segment merging
+ * @q: the request queue for the device
+ * @mask: the memory boundary mask
+ **/
+void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
+{
+ if (mask < PAGE_CACHE_SIZE - 1) {
+ mask = PAGE_CACHE_SIZE - 1;
+ printk(KERN_INFO "%s: set to minimum %lx\n",
+ __func__, mask);
+ }
+
+ q->seg_boundary_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_segment_boundary);
+
+/**
+ * blk_queue_dma_alignment - set dma length and memory alignment
+ * @q: the request queue for the device
+ * @mask: alignment mask
+ *
+ * description:
+ * set required memory and length alignment for direct dma transactions.
+ * this is used when buiding direct io requests for the queue.
+ *
+ **/
+void blk_queue_dma_alignment(struct request_queue *q, int mask)
+{
+ q->dma_alignment = mask;
+}
+EXPORT_SYMBOL(blk_queue_dma_alignment);
+
+/**
+ * blk_queue_update_dma_alignment - update dma length and memory alignment
+ * @q: the request queue for the device
+ * @mask: alignment mask
+ *
+ * description:
+ * update required memory and length alignment for direct dma transactions.
+ * If the requested alignment is larger than the current alignment, then
+ * the current queue alignment is updated to the new value, otherwise it
+ * is left alone. The design of this is to allow multiple objects
+ * (driver, device, transport etc) to set their respective
+ * alignments without having them interfere.
+ *
+ **/
+void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
+{
+ BUG_ON(mask > PAGE_SIZE);
+
+ if (mask > q->dma_alignment)
+ q->dma_alignment = mask;
+}
+EXPORT_SYMBOL(blk_queue_update_dma_alignment);
+
+static int __init blk_settings_init(void)
+{
+ blk_max_low_pfn = max_low_pfn - 1;
+ blk_max_pfn = max_pfn - 1;
+ return 0;
+}
+subsys_initcall(blk_settings_init);
diff --git a/libdde-linux26/contrib/block/blk-softirq.c b/libdde-linux26/contrib/block/blk-softirq.c
new file mode 100644
index 00000000..ce0efc6b
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk-softirq.c
@@ -0,0 +1,175 @@
+/*
+ * Functions related to softirq rq completions
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+
+#include "blk.h"
+
+static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
+
+/*
+ * Softirq action handler - move entries to local list and loop over them
+ * while passing them to the queue registered handler.
+ */
+static void blk_done_softirq(struct softirq_action *h)
+{
+ struct list_head *cpu_list, local_list;
+
+ local_irq_disable();
+ cpu_list = &__get_cpu_var(blk_cpu_done);
+ list_replace_init(cpu_list, &local_list);
+ local_irq_enable();
+
+ while (!list_empty(&local_list)) {
+ struct request *rq;
+
+ rq = list_entry(local_list.next, struct request, csd.list);
+ list_del_init(&rq->csd.list);
+ rq->q->softirq_done_fn(rq);
+ }
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+static void trigger_softirq(void *data)
+{
+ struct request *rq = data;
+ unsigned long flags;
+ struct list_head *list;
+
+ local_irq_save(flags);
+ list = &__get_cpu_var(blk_cpu_done);
+ list_add_tail(&rq->csd.list, list);
+
+ if (list->next == &rq->csd.list)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Setup and invoke a run of 'trigger_softirq' on the given cpu.
+ */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+ if (cpu_online(cpu)) {
+ struct call_single_data *data = &rq->csd;
+
+ data->func = trigger_softirq;
+ data->info = rq;
+ data->flags = 0;
+
+ __smp_call_function_single(cpu, data);
+ return 0;
+ }
+
+ return 1;
+}
+#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */
+static int raise_blk_irq(int cpu, struct request *rq)
+{
+ return 1;
+}
+#endif
+
+static int __cpuinit blk_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ /*
+ * If a CPU goes away, splice its entries to the current CPU
+ * and trigger a run of the softirq
+ */
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
+ int cpu = (unsigned long) hcpu;
+
+ local_irq_disable();
+ list_splice_init(&per_cpu(blk_cpu_done, cpu),
+ &__get_cpu_var(blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata blk_cpu_notifier = {
+ .notifier_call = blk_cpu_notify,
+};
+
+void __blk_complete_request(struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long flags;
+ int ccpu, cpu, group_cpu;
+
+ BUG_ON(!q->softirq_done_fn);
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ group_cpu = blk_cpu_to_group(cpu);
+
+ /*
+ * Select completion CPU
+ */
+ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1)
+ ccpu = req->cpu;
+ else
+ ccpu = cpu;
+
+ if (ccpu == cpu || ccpu == group_cpu) {
+ struct list_head *list;
+do_local:
+ list = &__get_cpu_var(blk_cpu_done);
+ list_add_tail(&req->csd.list, list);
+
+ /*
+ * if the list only contains our just added request,
+ * signal a raise of the softirq. If there are already
+ * entries there, someone already raised the irq but it
+ * hasn't run yet.
+ */
+ if (list->next == &req->csd.list)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ } else if (raise_blk_irq(ccpu, req))
+ goto do_local;
+
+ local_irq_restore(flags);
+}
+
+/**
+ * blk_complete_request - end I/O on a request
+ * @req: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions,
+ * unless the driver actually implements this in its completion callback
+ * through requeueing. The actual completion happens out-of-order,
+ * through a softirq handler. The user must have registered a completion
+ * callback through blk_queue_softirq_done().
+ **/
+void blk_complete_request(struct request *req)
+{
+ if (unlikely(blk_should_fake_timeout(req->q)))
+ return;
+ if (!blk_mark_rq_complete(req))
+ __blk_complete_request(req);
+}
+EXPORT_SYMBOL(blk_complete_request);
+
+static __init int blk_softirq_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
+
+ open_softirq(BLOCK_SOFTIRQ, blk_done_softirq);
+ register_hotcpu_notifier(&blk_cpu_notifier);
+ return 0;
+}
+subsys_initcall(blk_softirq_init);
diff --git a/libdde-linux26/contrib/block/blk-sysfs.c b/libdde-linux26/contrib/block/blk-sysfs.c
new file mode 100644
index 00000000..e29ddfc7
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk-sysfs.c
@@ -0,0 +1,426 @@
+/*
+ * Functions related to sysfs handling
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/blktrace_api.h>
+
+#include "blk.h"
+
+struct queue_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct request_queue *, char *);
+ ssize_t (*store)(struct request_queue *, const char *, size_t);
+};
+
+static ssize_t
+queue_var_show(unsigned int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+queue_var_store(unsigned long *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtoul(p, &p, 10);
+ return count;
+}
+
+static ssize_t queue_requests_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(q->nr_requests, (page));
+}
+
+static ssize_t
+queue_requests_store(struct request_queue *q, const char *page, size_t count)
+{
+ struct request_list *rl = &q->rq;
+ unsigned long nr;
+ int ret = queue_var_store(&nr, page, count);
+ if (nr < BLKDEV_MIN_RQ)
+ nr = BLKDEV_MIN_RQ;
+
+ spin_lock_irq(q->queue_lock);
+ q->nr_requests = nr;
+ blk_queue_congestion_threshold(q);
+
+ if (rl->count[READ] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, READ);
+ else if (rl->count[READ] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, READ);
+
+ if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
+ blk_set_queue_congested(q, WRITE);
+ else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
+ blk_clear_queue_congested(q, WRITE);
+
+ if (rl->count[READ] >= q->nr_requests) {
+ blk_set_queue_full(q, READ);
+ } else if (rl->count[READ]+1 <= q->nr_requests) {
+ blk_clear_queue_full(q, READ);
+ wake_up(&rl->wait[READ]);
+ }
+
+ if (rl->count[WRITE] >= q->nr_requests) {
+ blk_set_queue_full(q, WRITE);
+ } else if (rl->count[WRITE]+1 <= q->nr_requests) {
+ blk_clear_queue_full(q, WRITE);
+ wake_up(&rl->wait[WRITE]);
+ }
+ spin_unlock_irq(q->queue_lock);
+ return ret;
+}
+
+static ssize_t queue_ra_show(struct request_queue *q, char *page)
+{
+ int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+
+ return queue_var_show(ra_kb, (page));
+}
+
+static ssize_t
+queue_ra_store(struct request_queue *q, const char *page, size_t count)
+{
+ unsigned long ra_kb;
+ ssize_t ret = queue_var_store(&ra_kb, page, count);
+
+ q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+
+ return ret;
+}
+
+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
+{
+ int max_sectors_kb = q->max_sectors >> 1;
+
+ return queue_var_show(max_sectors_kb, (page));
+}
+
+static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(q->hardsect_size, page);
+}
+
+static ssize_t
+queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+{
+ unsigned long max_sectors_kb,
+ max_hw_sectors_kb = q->max_hw_sectors >> 1,
+ page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+ ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+
+ if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+ return -EINVAL;
+
+ spin_lock_irq(q->queue_lock);
+ q->max_sectors = max_sectors_kb << 1;
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+{
+ int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+
+ return queue_var_show(max_hw_sectors_kb, (page));
+}
+
+static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(!blk_queue_nonrot(q), page);
+}
+
+static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long nm;
+ ssize_t ret = queue_var_store(&nm, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (nm)
+ queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_set(QUEUE_FLAG_NONROT, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_nomerges(q), page);
+}
+
+static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long nm;
+ ssize_t ret = queue_var_store(&nm, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (nm)
+ queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
+{
+ unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
+
+ return queue_var_show(set != 0, page);
+}
+
+static ssize_t
+queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
+{
+ ssize_t ret = -EINVAL;
+#if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
+ unsigned long val;
+
+ ret = queue_var_store(&val, page, count);
+ spin_lock_irq(q->queue_lock);
+ if (val)
+ queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+ spin_unlock_irq(q->queue_lock);
+#endif
+ return ret;
+}
+
+static ssize_t queue_iostats_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_io_stat(q), page);
+}
+
+static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned long stats;
+ ssize_t ret = queue_var_store(&stats, page, count);
+
+ spin_lock_irq(q->queue_lock);
+ if (stats)
+ queue_flag_set(QUEUE_FLAG_IO_STAT, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return ret;
+}
+
+static struct queue_sysfs_entry queue_requests_entry = {
+ .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_requests_show,
+ .store = queue_requests_store,
+};
+
+static struct queue_sysfs_entry queue_ra_entry = {
+ .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_ra_show,
+ .store = queue_ra_store,
+};
+
+static struct queue_sysfs_entry queue_max_sectors_entry = {
+ .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_max_sectors_show,
+ .store = queue_max_sectors_store,
+};
+
+static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+ .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+ .show = queue_max_hw_sectors_show,
+};
+
+static struct queue_sysfs_entry queue_iosched_entry = {
+ .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
+ .show = elv_iosched_show,
+ .store = elv_iosched_store,
+};
+
+static struct queue_sysfs_entry queue_hw_sector_size_entry = {
+ .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
+ .show = queue_hw_sector_size_show,
+};
+
+static struct queue_sysfs_entry queue_nonrot_entry = {
+ .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_nonrot_show,
+ .store = queue_nonrot_store,
+};
+
+static struct queue_sysfs_entry queue_nomerges_entry = {
+ .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_nomerges_show,
+ .store = queue_nomerges_store,
+};
+
+static struct queue_sysfs_entry queue_rq_affinity_entry = {
+ .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_rq_affinity_show,
+ .store = queue_rq_affinity_store,
+};
+
+static struct queue_sysfs_entry queue_iostats_entry = {
+ .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
+ .show = queue_iostats_show,
+ .store = queue_iostats_store,
+};
+
+static struct attribute *default_attrs[] = {
+ &queue_requests_entry.attr,
+ &queue_ra_entry.attr,
+ &queue_max_hw_sectors_entry.attr,
+ &queue_max_sectors_entry.attr,
+ &queue_iosched_entry.attr,
+ &queue_hw_sector_size_entry.attr,
+ &queue_nonrot_entry.attr,
+ &queue_nomerges_entry.attr,
+ &queue_rq_affinity_entry.attr,
+ &queue_iostats_entry.attr,
+ NULL,
+};
+
+#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
+
+static ssize_t
+queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+ struct queue_sysfs_entry *entry = to_queue(attr);
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
+ ssize_t res;
+
+ if (!entry->show)
+ return -EIO;
+ mutex_lock(&q->sysfs_lock);
+ if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+ mutex_unlock(&q->sysfs_lock);
+ return -ENOENT;
+ }
+ res = entry->show(q, page);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+static ssize_t
+queue_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct queue_sysfs_entry *entry = to_queue(attr);
+ struct request_queue *q;
+ ssize_t res;
+
+ if (!entry->store)
+ return -EIO;
+
+ q = container_of(kobj, struct request_queue, kobj);
+ mutex_lock(&q->sysfs_lock);
+ if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+ mutex_unlock(&q->sysfs_lock);
+ return -ENOENT;
+ }
+ res = entry->store(q, page, length);
+ mutex_unlock(&q->sysfs_lock);
+ return res;
+}
+
+/**
+ * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
+ * @kobj: the kobj belonging of the request queue to be released
+ *
+ * Description:
+ * blk_cleanup_queue is the pair to blk_init_queue() or
+ * blk_queue_make_request(). It should be called when a request queue is
+ * being released; typically when a block device is being de-registered.
+ * Currently, its primary task it to free all the &struct request
+ * structures that were allocated to the queue and the queue itself.
+ *
+ * Caveat:
+ * Hopefully the low level driver will have finished any
+ * outstanding requests first...
+ **/
+static void blk_release_queue(struct kobject *kobj)
+{
+ struct request_queue *q =
+ container_of(kobj, struct request_queue, kobj);
+ struct request_list *rl = &q->rq;
+
+ blk_sync_queue(q);
+
+ if (rl->rq_pool)
+ mempool_destroy(rl->rq_pool);
+
+ if (q->queue_tags)
+ __blk_queue_free_tags(q);
+
+ blk_trace_shutdown(q);
+
+ bdi_destroy(&q->backing_dev_info);
+ kmem_cache_free(blk_requestq_cachep, q);
+}
+
+static struct sysfs_ops queue_sysfs_ops = {
+ .show = queue_attr_show,
+ .store = queue_attr_store,
+};
+
+struct kobj_type blk_queue_ktype = {
+ .sysfs_ops = &queue_sysfs_ops,
+ .default_attrs = default_attrs,
+ .release = blk_release_queue,
+};
+
+int blk_register_queue(struct gendisk *disk)
+{
+ int ret;
+
+ struct request_queue *q = disk->queue;
+
+ if (WARN_ON(!q))
+ return -ENXIO;
+
+ if (!q->request_fn)
+ return 0;
+
+ ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj),
+ "%s", "queue");
+ if (ret < 0)
+ return ret;
+
+ kobject_uevent(&q->kobj, KOBJ_ADD);
+
+ ret = elv_register_queue(q);
+ if (ret) {
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
+ kobject_del(&q->kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+void blk_unregister_queue(struct gendisk *disk)
+{
+ struct request_queue *q = disk->queue;
+
+ if (WARN_ON(!q))
+ return;
+
+ if (q->request_fn) {
+ elv_unregister_queue(q);
+
+ kobject_uevent(&q->kobj, KOBJ_REMOVE);
+ kobject_del(&q->kobj);
+ kobject_put(&disk_to_dev(disk)->kobj);
+ }
+}
diff --git a/libdde-linux26/contrib/block/blk-tag.c b/libdde-linux26/contrib/block/blk-tag.c
new file mode 100644
index 00000000..3c518e33
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk-tag.c
@@ -0,0 +1,402 @@
+/*
+ * Functions related to tagged command queuing
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+
+#include "blk.h"
+
+/**
+ * blk_queue_find_tag - find a request by its tag and queue
+ * @q: The request queue for the device
+ * @tag: The tag of the request
+ *
+ * Notes:
+ * Should be used when a device returns a tag and you want to match
+ * it with a request.
+ *
+ * no locks need be held.
+ **/
+struct request *blk_queue_find_tag(struct request_queue *q, int tag)
+{
+ return blk_map_queue_find_tag(q->queue_tags, tag);
+}
+EXPORT_SYMBOL(blk_queue_find_tag);
+
+/**
+ * __blk_free_tags - release a given set of tag maintenance info
+ * @bqt: the tag map to free
+ *
+ * Tries to free the specified @bqt. Returns true if it was
+ * actually freed and false if there are still references using it
+ */
+static int __blk_free_tags(struct blk_queue_tag *bqt)
+{
+ int retval;
+
+ retval = atomic_dec_and_test(&bqt->refcnt);
+ if (retval) {
+ BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
+ bqt->max_depth);
+
+ kfree(bqt->tag_index);
+ bqt->tag_index = NULL;
+
+ kfree(bqt->tag_map);
+ bqt->tag_map = NULL;
+
+ kfree(bqt);
+ }
+
+ return retval;
+}
+
+/**
+ * __blk_queue_free_tags - release tag maintenance info
+ * @q: the request queue for the device
+ *
+ * Notes:
+ * blk_cleanup_queue() will take care of calling this function, if tagging
+ * has been used. So there's no need to call this directly.
+ **/
+void __blk_queue_free_tags(struct request_queue *q)
+{
+ struct blk_queue_tag *bqt = q->queue_tags;
+
+ if (!bqt)
+ return;
+
+ __blk_free_tags(bqt);
+
+ q->queue_tags = NULL;
+ queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
+}
+
+/**
+ * blk_free_tags - release a given set of tag maintenance info
+ * @bqt: the tag map to free
+ *
+ * For externally managed @bqt frees the map. Callers of this
+ * function must guarantee to have released all the queues that
+ * might have been using this tag map.
+ */
+void blk_free_tags(struct blk_queue_tag *bqt)
+{
+ if (unlikely(!__blk_free_tags(bqt)))
+ BUG();
+}
+EXPORT_SYMBOL(blk_free_tags);
+
+/**
+ * blk_queue_free_tags - release tag maintenance info
+ * @q: the request queue for the device
+ *
+ * Notes:
+ * This is used to disable tagged queuing to a device, yet leave
+ * queue in function.
+ **/
+void blk_queue_free_tags(struct request_queue *q)
+{
+ queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
+}
+EXPORT_SYMBOL(blk_queue_free_tags);
+
+static int
+init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
+{
+ struct request **tag_index;
+ unsigned long *tag_map;
+ int nr_ulongs;
+
+ if (q && depth > q->nr_requests * 2) {
+ depth = q->nr_requests * 2;
+ printk(KERN_ERR "%s: adjusted depth to %d\n",
+ __func__, depth);
+ }
+
+ tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+ if (!tag_index)
+ goto fail;
+
+ nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
+ tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
+ if (!tag_map)
+ goto fail;
+
+ tags->real_max_depth = depth;
+ tags->max_depth = depth;
+ tags->tag_index = tag_index;
+ tags->tag_map = tag_map;
+
+ return 0;
+fail:
+ kfree(tag_index);
+ return -ENOMEM;
+}
+
+static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
+ int depth)
+{
+ struct blk_queue_tag *tags;
+
+ tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
+ if (!tags)
+ goto fail;
+
+ if (init_tag_map(q, tags, depth))
+ goto fail;
+
+ atomic_set(&tags->refcnt, 1);
+ return tags;
+fail:
+ kfree(tags);
+ return NULL;
+}
+
+/**
+ * blk_init_tags - initialize the tag info for an external tag map
+ * @depth: the maximum queue depth supported
+ **/
+struct blk_queue_tag *blk_init_tags(int depth)
+{
+ return __blk_queue_init_tags(NULL, depth);
+}
+EXPORT_SYMBOL(blk_init_tags);
+
+/**
+ * blk_queue_init_tags - initialize the queue tag info
+ * @q: the request queue for the device
+ * @depth: the maximum queue depth supported
+ * @tags: the tag to use
+ *
+ * Queue lock must be held here if the function is called to resize an
+ * existing map.
+ **/
+int blk_queue_init_tags(struct request_queue *q, int depth,
+ struct blk_queue_tag *tags)
+{
+ int rc;
+
+ BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
+
+ if (!tags && !q->queue_tags) {
+ tags = __blk_queue_init_tags(q, depth);
+
+ if (!tags)
+ goto fail;
+ } else if (q->queue_tags) {
+ rc = blk_queue_resize_tags(q, depth);
+ if (rc)
+ return rc;
+ queue_flag_set(QUEUE_FLAG_QUEUED, q);
+ return 0;
+ } else
+ atomic_inc(&tags->refcnt);
+
+ /*
+ * assign it, all done
+ */
+ q->queue_tags = tags;
+ queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
+ INIT_LIST_HEAD(&q->tag_busy_list);
+ return 0;
+fail:
+ kfree(tags);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(blk_queue_init_tags);
+
+/**
+ * blk_queue_resize_tags - change the queueing depth
+ * @q: the request queue for the device
+ * @new_depth: the new max command queueing depth
+ *
+ * Notes:
+ * Must be called with the queue lock held.
+ **/
+int blk_queue_resize_tags(struct request_queue *q, int new_depth)
+{
+ struct blk_queue_tag *bqt = q->queue_tags;
+ struct request **tag_index;
+ unsigned long *tag_map;
+ int max_depth, nr_ulongs;
+
+ if (!bqt)
+ return -ENXIO;
+
+ /*
+ * if we already have large enough real_max_depth. just
+ * adjust max_depth. *NOTE* as requests with tag value
+ * between new_depth and real_max_depth can be in-flight, tag
+ * map can not be shrunk blindly here.
+ */
+ if (new_depth <= bqt->real_max_depth) {
+ bqt->max_depth = new_depth;
+ return 0;
+ }
+
+ /*
+ * Currently cannot replace a shared tag map with a new
+ * one, so error out if this is the case
+ */
+ if (atomic_read(&bqt->refcnt) != 1)
+ return -EBUSY;
+
+ /*
+ * save the old state info, so we can copy it back
+ */
+ tag_index = bqt->tag_index;
+ tag_map = bqt->tag_map;
+ max_depth = bqt->real_max_depth;
+
+ if (init_tag_map(q, bqt, new_depth))
+ return -ENOMEM;
+
+ memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
+ nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
+ memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
+
+ kfree(tag_index);
+ kfree(tag_map);
+ return 0;
+}
+EXPORT_SYMBOL(blk_queue_resize_tags);
+
+/**
+ * blk_queue_end_tag - end tag operations for a request
+ * @q: the request queue for the device
+ * @rq: the request that has completed
+ *
+ * Description:
+ * Typically called when end_that_request_first() returns %0, meaning
+ * all transfers have been done for a request. It's important to call
+ * this function before end_that_request_last(), as that will put the
+ * request back on the free list thus corrupting the internal tag list.
+ *
+ * Notes:
+ * queue lock must be held.
+ **/
+void blk_queue_end_tag(struct request_queue *q, struct request *rq)
+{
+ struct blk_queue_tag *bqt = q->queue_tags;
+ int tag = rq->tag;
+
+ BUG_ON(tag == -1);
+
+ if (unlikely(tag >= bqt->real_max_depth))
+ /*
+ * This can happen after tag depth has been reduced.
+ * FIXME: how about a warning or info message here?
+ */
+ return;
+
+ list_del_init(&rq->queuelist);
+ rq->cmd_flags &= ~REQ_QUEUED;
+ rq->tag = -1;
+
+ if (unlikely(bqt->tag_index[tag] == NULL))
+ printk(KERN_ERR "%s: tag %d is missing\n",
+ __func__, tag);
+
+ bqt->tag_index[tag] = NULL;
+
+ if (unlikely(!test_bit(tag, bqt->tag_map))) {
+ printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
+ __func__, tag);
+ return;
+ }
+ /*
+ * The tag_map bit acts as a lock for tag_index[bit], so we need
+ * unlock memory barrier semantics.
+ */
+ clear_bit_unlock(tag, bqt->tag_map);
+}
+EXPORT_SYMBOL(blk_queue_end_tag);
+
+/**
+ * blk_queue_start_tag - find a free tag and assign it
+ * @q: the request queue for the device
+ * @rq: the block request that needs tagging
+ *
+ * Description:
+ * This can either be used as a stand-alone helper, or possibly be
+ * assigned as the queue &prep_rq_fn (in which case &struct request
+ * automagically gets a tag assigned). Note that this function
+ * assumes that any type of request can be queued! if this is not
+ * true for your device, you must check the request type before
+ * calling this function. The request will also be removed from
+ * the request queue, so it's the drivers responsibility to readd
+ * it if it should need to be restarted for some reason.
+ *
+ * Notes:
+ * queue lock must be held.
+ **/
+int blk_queue_start_tag(struct request_queue *q, struct request *rq)
+{
+ struct blk_queue_tag *bqt = q->queue_tags;
+ unsigned max_depth, offset;
+ int tag;
+
+ if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
+ printk(KERN_ERR
+ "%s: request %p for device [%s] already tagged %d",
+ __func__, rq,
+ rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
+ BUG();
+ }
+
+ /*
+ * Protect against shared tag maps, as we may not have exclusive
+ * access to the tag map.
+ *
+ * We reserve a few tags just for sync IO, since we don't want
+ * to starve sync IO on behalf of flooding async IO.
+ */
+ max_depth = bqt->max_depth;
+ if (rq_is_sync(rq))
+ offset = 0;
+ else
+ offset = max_depth >> 2;
+
+ do {
+ tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
+ if (tag >= max_depth)
+ return 1;
+
+ } while (test_and_set_bit_lock(tag, bqt->tag_map));
+ /*
+ * We need lock ordering semantics given by test_and_set_bit_lock.
+ * See blk_queue_end_tag for details.
+ */
+
+ rq->cmd_flags |= REQ_QUEUED;
+ rq->tag = tag;
+ bqt->tag_index[tag] = rq;
+ blkdev_dequeue_request(rq);
+ list_add(&rq->queuelist, &q->tag_busy_list);
+ return 0;
+}
+EXPORT_SYMBOL(blk_queue_start_tag);
+
+/**
+ * blk_queue_invalidate_tags - invalidate all pending tags
+ * @q: the request queue for the device
+ *
+ * Description:
+ * Hardware conditions may dictate a need to stop all pending requests.
+ * In this case, we will safely clear the block side of the tag queue and
+ * readd all requests to the request queue in the right order.
+ *
+ * Notes:
+ * queue lock must be held.
+ **/
+void blk_queue_invalidate_tags(struct request_queue *q)
+{
+ struct list_head *tmp, *n;
+
+ list_for_each_safe(tmp, n, &q->tag_busy_list)
+ blk_requeue_request(q, list_entry_rq(tmp));
+}
+EXPORT_SYMBOL(blk_queue_invalidate_tags);
diff --git a/libdde-linux26/contrib/block/blk-timeout.c b/libdde-linux26/contrib/block/blk-timeout.c
new file mode 100644
index 00000000..19da232b
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk-timeout.c
@@ -0,0 +1,232 @@
+/*
+ * Functions related to generic timeout handling of requests.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/fault-inject.h>
+
+#include "blk.h"
+
+#include <ddekit/timer.h>
+
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+
+static DECLARE_FAULT_ATTR(fail_io_timeout);
+
+static int __init setup_fail_io_timeout(char *str)
+{
+ return setup_fault_attr(&fail_io_timeout, str);
+}
+__setup("fail_io_timeout=", setup_fail_io_timeout);
+
+int blk_should_fake_timeout(struct request_queue *q)
+{
+ if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags))
+ return 0;
+
+ return should_fail(&fail_io_timeout, 1);
+}
+
+static int __init fail_io_timeout_debugfs(void)
+{
+ return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout");
+}
+
+late_initcall(fail_io_timeout_debugfs);
+
+ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags);
+
+ return sprintf(buf, "%d\n", set != 0);
+}
+
+ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gendisk *disk = dev_to_disk(dev);
+ int val;
+
+ if (count) {
+ struct request_queue *q = disk->queue;
+ char *p = (char *) buf;
+
+ val = simple_strtoul(p, &p, 10);
+ spin_lock_irq(q->queue_lock);
+ if (val)
+ queue_flag_set(QUEUE_FLAG_FAIL_IO, q);
+ else
+ queue_flag_clear(QUEUE_FLAG_FAIL_IO, q);
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ return count;
+}
+
+#endif /* CONFIG_FAIL_IO_TIMEOUT */
+
+/*
+ * blk_delete_timer - Delete/cancel timer for a given function.
+ * @req: request that we are canceling timer for
+ *
+ */
+void blk_delete_timer(struct request *req)
+{
+ list_del_init(&req->timeout_list);
+}
+
+static void blk_rq_timed_out(struct request *req)
+{
+ struct request_queue *q = req->q;
+ enum blk_eh_timer_return ret;
+
+ ret = q->rq_timed_out_fn(req);
+ switch (ret) {
+ case BLK_EH_HANDLED:
+ __blk_complete_request(req);
+ break;
+ case BLK_EH_RESET_TIMER:
+ blk_clear_rq_complete(req);
+ blk_add_timer(req);
+ break;
+ case BLK_EH_NOT_HANDLED:
+ /*
+ * LLD handles this for now but in the future
+ * we can send a request msg to abort the command
+ * and we can move more of the generic scsi eh code to
+ * the blk layer.
+ */
+ break;
+ default:
+ printk(KERN_ERR "block: bad eh return: %d\n", ret);
+ break;
+ }
+}
+
+void blk_rq_timed_out_timer(unsigned long data)
+{
+ struct request_queue *q = (struct request_queue *) data;
+ unsigned long flags, next = 0;
+ struct request *rq, *tmp;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
+ if (time_after_eq(jiffies, rq->deadline)) {
+ list_del_init(&rq->timeout_list);
+
+ /*
+ * Check if we raced with end io completion
+ */
+ if (blk_mark_rq_complete(rq))
+ continue;
+ blk_rq_timed_out(rq);
+ } else {
+ if (!next || time_after(next, rq->deadline))
+ next = rq->deadline;
+ }
+ }
+
+ /*
+ * next can never be 0 here with the list non-empty, since we always
+ * bump ->deadline to 1 so we can detect if the timer was ever added
+ * or not. See comment in blk_add_timer()
+ */
+ if (next)
+ mod_timer(&q->timeout, round_jiffies_up(next));
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/**
+ * blk_abort_request -- Request request recovery for the specified command
+ * @req: pointer to the request of interest
+ *
+ * This function requests that the block layer start recovery for the
+ * request by deleting the timer and calling the q's timeout function.
+ * LLDDs who implement their own error recovery MAY ignore the timeout
+ * event if they generated blk_abort_req. Must hold queue lock.
+ */
+void blk_abort_request(struct request *req)
+{
+ if (blk_mark_rq_complete(req))
+ return;
+ blk_delete_timer(req);
+ blk_rq_timed_out(req);
+}
+EXPORT_SYMBOL_GPL(blk_abort_request);
+
+/**
+ * blk_add_timer - Start timeout timer for a single request
+ * @req: request that is about to start running.
+ *
+ * Notes:
+ * Each request has its own timer, and as it is added to the queue, we
+ * set up the timer. When the request completes, we cancel the timer.
+ */
+void blk_add_timer(struct request *req)
+{
+ struct request_queue *q = req->q;
+ unsigned long expiry;
+
+ if (!q->rq_timed_out_fn)
+ return;
+
+ BUG_ON(!list_empty(&req->timeout_list));
+ BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
+
+ if (req->timeout)
+ req->deadline = jiffies + req->timeout;
+ else {
+ req->deadline = jiffies + q->rq_timeout;
+ /*
+ * Some LLDs, like scsi, peek at the timeout to prevent
+ * a command from being retried forever.
+ */
+ req->timeout = q->rq_timeout;
+ }
+ list_add_tail(&req->timeout_list, &q->timeout_list);
+
+ /*
+ * If the timer isn't already pending or this timeout is earlier
+ * than an existing one, modify the timer. Round up to next nearest
+ * second.
+ */
+ expiry = round_jiffies_up(req->deadline);
+
+ if (!timer_pending(&q->timeout) ||
+ time_before(expiry, q->timeout.expires))
+ mod_timer(&q->timeout, expiry);
+}
+
+/**
+ * blk_abort_queue -- Abort all request on given queue
+ * @queue: pointer to queue
+ *
+ */
+void blk_abort_queue(struct request_queue *q)
+{
+ unsigned long flags;
+ struct request *rq, *tmp;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ elv_abort_queue(q);
+
+ /*
+ * Splice entries to local list, to avoid deadlocking if entries
+ * get readded to the timeout list by error handling
+ */
+ list_splice_init(&q->timeout_list, &list);
+
+ list_for_each_entry_safe(rq, tmp, &list, timeout_list)
+ blk_abort_request(rq);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+}
+EXPORT_SYMBOL_GPL(blk_abort_queue);
diff --git a/libdde-linux26/contrib/block/blk.h b/libdde-linux26/contrib/block/blk.h
new file mode 100644
index 00000000..0dce92c3
--- /dev/null
+++ b/libdde-linux26/contrib/block/blk.h
@@ -0,0 +1,119 @@
+#ifndef BLK_INTERNAL_H
+#define BLK_INTERNAL_H
+
+/* Amount of time in which a process may batch requests */
+#define BLK_BATCH_TIME (HZ/50UL)
+
+/* Number of requests a "batching" process may submit */
+#define BLK_BATCH_REQ 32
+
+extern struct kmem_cache *blk_requestq_cachep;
+extern struct kobj_type blk_queue_ktype;
+
+void init_request_from_bio(struct request *req, struct bio *bio);
+void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
+ struct bio *bio);
+void __blk_queue_free_tags(struct request_queue *q);
+
+void blk_unplug_work(struct work_struct *work);
+void blk_unplug_timeout(unsigned long data);
+void blk_rq_timed_out_timer(unsigned long data);
+void blk_delete_timer(struct request *);
+void blk_add_timer(struct request *);
+void __generic_unplug_device(struct request_queue *);
+
+/*
+ * Internal atomic flags for request handling
+ */
+enum rq_atomic_flags {
+ REQ_ATOM_COMPLETE = 0,
+};
+
+/*
+ * EH timer and IO completion will both attempt to 'grab' the request, make
+ * sure that only one of them suceeds
+ */
+static inline int blk_mark_rq_complete(struct request *rq)
+{
+ return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+}
+
+static inline void blk_clear_rq_complete(struct request *rq)
+{
+ clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
+}
+
+#ifdef CONFIG_FAIL_IO_TIMEOUT
+int blk_should_fake_timeout(struct request_queue *);
+ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
+ssize_t part_timeout_store(struct device *, struct device_attribute *,
+ const char *, size_t);
+#else
+static inline int blk_should_fake_timeout(struct request_queue *q)
+{
+ return 0;
+}
+#endif
+
+struct io_context *current_io_context(gfp_t gfp_flags, int node);
+
+int ll_back_merge_fn(struct request_queue *q, struct request *req,
+ struct bio *bio);
+int ll_front_merge_fn(struct request_queue *q, struct request *req,
+ struct bio *bio);
+int attempt_back_merge(struct request_queue *q, struct request *rq);
+int attempt_front_merge(struct request_queue *q, struct request *rq);
+void blk_recalc_rq_segments(struct request *rq);
+void blk_recalc_rq_sectors(struct request *rq, int nsect);
+
+void blk_queue_congestion_threshold(struct request_queue *q);
+
+int blk_dev_init(void);
+
+/*
+ * Return the threshold (number of used requests) at which the queue is
+ * considered to be congested. It include a little hysteresis to keep the
+ * context switch rate down.
+ */
+static inline int queue_congestion_on_threshold(struct request_queue *q)
+{
+ return q->nr_congestion_on;
+}
+
+/*
+ * The threshold at which a queue is considered to be uncongested
+ */
+static inline int queue_congestion_off_threshold(struct request_queue *q)
+{
+ return q->nr_congestion_off;
+}
+
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+#define rq_for_each_integrity_segment(bvl, _rq, _iter) \
+ __rq_for_each_bio(_iter.bio, _rq) \
+ bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i)
+
+#endif /* BLK_DEV_INTEGRITY */
+
+static inline int blk_cpu_to_group(int cpu)
+{
+#ifdef CONFIG_SCHED_MC
+ const struct cpumask *mask = cpu_coregroup_mask(cpu);
+ return cpumask_first(mask);
+#elif defined(CONFIG_SCHED_SMT)
+ return first_cpu(per_cpu(cpu_sibling_map, cpu));
+#else
+ return cpu;
+#endif
+}
+
+static inline int blk_do_io_stat(struct request_queue *q)
+{
+ if (q)
+ return blk_queue_io_stat(q);
+
+ return 0;
+}
+
+#endif
diff --git a/libdde-linux26/contrib/block/cfq-iosched.c b/libdde-linux26/contrib/block/cfq-iosched.c
new file mode 100644
index 00000000..37c1fca9
--- /dev/null
+++ b/libdde-linux26/contrib/block/cfq-iosched.c
@@ -0,0 +1,2465 @@
+/*
+ * CFQ, or complete fairness queueing, disk scheduler.
+ *
+ * Based on ideas from a previously unfinished io
+ * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
+ *
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
+ */
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/rbtree.h>
+#include <linux/ioprio.h>
+#include <linux/blktrace_api.h>
+#include <ddekit/timer.h>
+
+/*
+ * tunables
+ */
+/* max queue in one round of service */
+static const int cfq_quantum = 4;
+static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
+/* maximum backwards seek, in KiB */
+static const int cfq_back_max = 16 * 1024;
+/* penalty of a backwards seek */
+static const int cfq_back_penalty = 2;
+static const int cfq_slice_sync = HZ / 10;
+static int cfq_slice_async = HZ / 25;
+static const int cfq_slice_async_rq = 2;
+static int cfq_slice_idle = HZ / 125;
+
+/*
+ * offset from end of service tree
+ */
+#define CFQ_IDLE_DELAY (HZ / 5)
+
+/*
+ * below this threshold, we consider thinktime immediate
+ */
+#define CFQ_MIN_TT (2)
+
+#define CFQ_SLICE_SCALE (5)
+#define CFQ_HW_QUEUE_MIN (5)
+
+#define RQ_CIC(rq) \
+ ((struct cfq_io_context *) (rq)->elevator_private)
+#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
+
+static struct kmem_cache *cfq_pool;
+static struct kmem_cache *cfq_ioc_pool;
+
+static DEFINE_PER_CPU(unsigned long, ioc_count);
+static struct completion *ioc_gone;
+static DEFINE_SPINLOCK(ioc_gone_lock);
+
+#define CFQ_PRIO_LISTS IOPRIO_BE_NR
+#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
+#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
+
+#define ASYNC (0)
+#define SYNC (1)
+
+#define sample_valid(samples) ((samples) > 80)
+
+/*
+ * Most of our rbtree usage is for sorting with min extraction, so
+ * if we cache the leftmost node we don't have to walk down the tree
+ * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
+ * move this into the elevator for the rq sorting as well.
+ */
+struct cfq_rb_root {
+ struct rb_root rb;
+ struct rb_node *left;
+};
+#define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
+
+/*
+ * Per block device queue structure
+ */
+struct cfq_data {
+ struct request_queue *queue;
+
+ /*
+ * rr list of queues with requests and the count of them
+ */
+ struct cfq_rb_root service_tree;
+ unsigned int busy_queues;
+ /*
+ * Used to track any pending rt requests so we can pre-empt current
+ * non-RT cfqq in service when this value is non-zero.
+ */
+ unsigned int busy_rt_queues;
+
+ int rq_in_driver;
+ int sync_flight;
+
+ /*
+ * queue-depth detection
+ */
+ int rq_queued;
+ int hw_tag;
+ int hw_tag_samples;
+ int rq_in_driver_peak;
+
+ /*
+ * idle window management
+ */
+ struct timer_list idle_slice_timer;
+ struct work_struct unplug_work;
+
+ struct cfq_queue *active_queue;
+ struct cfq_io_context *active_cic;
+
+ /*
+ * async queue for each priority case
+ */
+ struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
+ struct cfq_queue *async_idle_cfqq;
+
+ sector_t last_position;
+ unsigned long last_end_request;
+
+ /*
+ * tunables, see top of file
+ */
+ unsigned int cfq_quantum;
+ unsigned int cfq_fifo_expire[2];
+ unsigned int cfq_back_penalty;
+ unsigned int cfq_back_max;
+ unsigned int cfq_slice[2];
+ unsigned int cfq_slice_async_rq;
+ unsigned int cfq_slice_idle;
+
+ struct list_head cic_list;
+};
+
+/*
+ * Per process-grouping structure
+ */
+struct cfq_queue {
+ /* reference count */
+ atomic_t ref;
+ /* various state flags, see below */
+ unsigned int flags;
+ /* parent cfq_data */
+ struct cfq_data *cfqd;
+ /* service_tree member */
+ struct rb_node rb_node;
+ /* service_tree key */
+ unsigned long rb_key;
+ /* sorted list of pending requests */
+ struct rb_root sort_list;
+ /* if fifo isn't expired, next request to serve */
+ struct request *next_rq;
+ /* requests queued in sort_list */
+ int queued[2];
+ /* currently allocated requests */
+ int allocated[2];
+ /* fifo list of requests in sort_list */
+ struct list_head fifo;
+
+ unsigned long slice_end;
+ long slice_resid;
+
+ /* pending metadata requests */
+ int meta_pending;
+ /* number of requests that are on the dispatch list or inside driver */
+ int dispatched;
+
+ /* io prio of this group */
+ unsigned short ioprio, org_ioprio;
+ unsigned short ioprio_class, org_ioprio_class;
+
+ pid_t pid;
+};
+
+enum cfqq_state_flags {
+ CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
+ CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
+ CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
+ CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
+ CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
+ CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
+ CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
+ CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
+ CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
+ CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
+ CFQ_CFQQ_FLAG_sync, /* synchronous queue */
+};
+
+#define CFQ_CFQQ_FNS(name) \
+static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
+{ \
+ (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
+} \
+static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
+{ \
+ (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
+} \
+static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
+{ \
+ return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
+}
+
+CFQ_CFQQ_FNS(on_rr);
+CFQ_CFQQ_FNS(wait_request);
+CFQ_CFQQ_FNS(must_alloc);
+CFQ_CFQQ_FNS(must_alloc_slice);
+CFQ_CFQQ_FNS(must_dispatch);
+CFQ_CFQQ_FNS(fifo_expire);
+CFQ_CFQQ_FNS(idle_window);
+CFQ_CFQQ_FNS(prio_changed);
+CFQ_CFQQ_FNS(queue_new);
+CFQ_CFQQ_FNS(slice_new);
+CFQ_CFQQ_FNS(sync);
+#undef CFQ_CFQQ_FNS
+
+#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
+ blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
+#define cfq_log(cfqd, fmt, args...) \
+ blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
+
+static void cfq_dispatch_insert(struct request_queue *, struct request *);
+static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
+ struct io_context *, gfp_t);
+static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
+ struct io_context *);
+
+static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
+ int is_sync)
+{
+ return cic->cfqq[!!is_sync];
+}
+
+static inline void cic_set_cfqq(struct cfq_io_context *cic,
+ struct cfq_queue *cfqq, int is_sync)
+{
+ cic->cfqq[!!is_sync] = cfqq;
+}
+
+/*
+ * We regard a request as SYNC, if it's either a read or has the SYNC bit
+ * set (in which case it could also be direct WRITE).
+ */
+static inline int cfq_bio_sync(struct bio *bio)
+{
+ if (bio_data_dir(bio) == READ || bio_sync(bio))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * scheduler run of queue, if there are requests pending and no one in the
+ * driver that will restart queueing
+ */
+static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
+{
+ if (cfqd->busy_queues) {
+ cfq_log(cfqd, "schedule dispatch");
+ kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
+ }
+}
+
+static int cfq_queue_empty(struct request_queue *q)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
+ return !cfqd->busy_queues;
+}
+
+/*
+ * Scale schedule slice based on io priority. Use the sync time slice only
+ * if a queue is marked sync and has sync io queued. A sync queue with async
+ * io only, should not get full sync slice length.
+ */
+static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
+ unsigned short prio)
+{
+ const int base_slice = cfqd->cfq_slice[sync];
+
+ WARN_ON(prio >= IOPRIO_BE_NR);
+
+ return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
+}
+
+static inline int
+cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
+}
+
+static inline void
+cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
+ cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
+}
+
+/*
+ * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
+ * isn't valid until the first request from the dispatch is activated
+ * and the slice time set.
+ */
+static inline int cfq_slice_used(struct cfq_queue *cfqq)
+{
+ if (cfq_cfqq_slice_new(cfqq))
+ return 0;
+ if (time_before(jiffies, cfqq->slice_end))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
+ * We choose the request that is closest to the head right now. Distance
+ * behind the head is penalized and only allowed to a certain extent.
+ */
+static struct request *
+cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
+{
+ sector_t last, s1, s2, d1 = 0, d2 = 0;
+ unsigned long back_max;
+#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
+#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
+ unsigned wrap = 0; /* bit mask: requests behind the disk head? */
+
+ if (rq1 == NULL || rq1 == rq2)
+ return rq2;
+ if (rq2 == NULL)
+ return rq1;
+
+ if (rq_is_sync(rq1) && !rq_is_sync(rq2))
+ return rq1;
+ else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
+ return rq2;
+ if (rq_is_meta(rq1) && !rq_is_meta(rq2))
+ return rq1;
+ else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
+ return rq2;
+
+ s1 = rq1->sector;
+ s2 = rq2->sector;
+
+ last = cfqd->last_position;
+
+ /*
+ * by definition, 1KiB is 2 sectors
+ */
+ back_max = cfqd->cfq_back_max * 2;
+
+ /*
+ * Strict one way elevator _except_ in the case where we allow
+ * short backward seeks which are biased as twice the cost of a
+ * similar forward seek.
+ */
+ if (s1 >= last)
+ d1 = s1 - last;
+ else if (s1 + back_max >= last)
+ d1 = (last - s1) * cfqd->cfq_back_penalty;
+ else
+ wrap |= CFQ_RQ1_WRAP;
+
+ if (s2 >= last)
+ d2 = s2 - last;
+ else if (s2 + back_max >= last)
+ d2 = (last - s2) * cfqd->cfq_back_penalty;
+ else
+ wrap |= CFQ_RQ2_WRAP;
+
+ /* Found required data */
+
+ /*
+ * By doing switch() on the bit mask "wrap" we avoid having to
+ * check two variables for all permutations: --> faster!
+ */
+ switch (wrap) {
+ case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
+ if (d1 < d2)
+ return rq1;
+ else if (d2 < d1)
+ return rq2;
+ else {
+ if (s1 >= s2)
+ return rq1;
+ else
+ return rq2;
+ }
+
+ case CFQ_RQ2_WRAP:
+ return rq1;
+ case CFQ_RQ1_WRAP:
+ return rq2;
+ case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
+ default:
+ /*
+ * Since both rqs are wrapped,
+ * start with the one that's further behind head
+ * (--> only *one* back seek required),
+ * since back seek takes more time than forward.
+ */
+ if (s1 <= s2)
+ return rq1;
+ else
+ return rq2;
+ }
+}
+
+/*
+ * The below is leftmost cache rbtree addon
+ */
+static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
+{
+ if (!root->left)
+ root->left = rb_first(&root->rb);
+
+ if (root->left)
+ return rb_entry(root->left, struct cfq_queue, rb_node);
+
+ return NULL;
+}
+
+static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
+{
+ if (root->left == n)
+ root->left = NULL;
+
+ rb_erase(n, &root->rb);
+ RB_CLEAR_NODE(n);
+}
+
+/*
+ * would be nice to take fifo expire time into account as well
+ */
+static struct request *
+cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct request *last)
+{
+ struct rb_node *rbnext = rb_next(&last->rb_node);
+ struct rb_node *rbprev = rb_prev(&last->rb_node);
+ struct request *next = NULL, *prev = NULL;
+
+ BUG_ON(RB_EMPTY_NODE(&last->rb_node));
+
+ if (rbprev)
+ prev = rb_entry_rq(rbprev);
+
+ if (rbnext)
+ next = rb_entry_rq(rbnext);
+ else {
+ rbnext = rb_first(&cfqq->sort_list);
+ if (rbnext && rbnext != &last->rb_node)
+ next = rb_entry_rq(rbnext);
+ }
+
+ return cfq_choose_req(cfqd, next, prev);
+}
+
+static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
+ struct cfq_queue *cfqq)
+{
+ /*
+ * just an approximation, should be ok.
+ */
+ return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
+ cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
+}
+
+/*
+ * The cfqd->service_tree holds all pending cfq_queue's that have
+ * requests waiting to be processed. It is sorted in the order that
+ * we will service the queues.
+ */
+static void cfq_service_tree_add(struct cfq_data *cfqd,
+ struct cfq_queue *cfqq, int add_front)
+{
+ struct rb_node **p, *parent;
+ struct cfq_queue *__cfqq;
+ unsigned long rb_key;
+ int left;
+
+ if (cfq_class_idle(cfqq)) {
+ rb_key = CFQ_IDLE_DELAY;
+ parent = rb_last(&cfqd->service_tree.rb);
+ if (parent && parent != &cfqq->rb_node) {
+ __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
+ rb_key += __cfqq->rb_key;
+ } else
+ rb_key += jiffies;
+ } else if (!add_front) {
+ rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
+ rb_key += cfqq->slice_resid;
+ cfqq->slice_resid = 0;
+ } else
+ rb_key = 0;
+
+ if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
+ /*
+ * same position, nothing more to do
+ */
+ if (rb_key == cfqq->rb_key)
+ return;
+
+ cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
+ }
+
+ left = 1;
+ parent = NULL;
+ p = &cfqd->service_tree.rb.rb_node;
+ while (*p) {
+ struct rb_node **n;
+
+ parent = *p;
+ __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
+
+ /*
+ * sort RT queues first, we always want to give
+ * preference to them. IDLE queues goes to the back.
+ * after that, sort on the next service time.
+ */
+ if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
+ n = &(*p)->rb_left;
+ else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
+ n = &(*p)->rb_right;
+ else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
+ n = &(*p)->rb_left;
+ else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
+ n = &(*p)->rb_right;
+ else if (rb_key < __cfqq->rb_key)
+ n = &(*p)->rb_left;
+ else
+ n = &(*p)->rb_right;
+
+ if (n == &(*p)->rb_right)
+ left = 0;
+
+ p = n;
+ }
+
+ if (left)
+ cfqd->service_tree.left = &cfqq->rb_node;
+
+ cfqq->rb_key = rb_key;
+ rb_link_node(&cfqq->rb_node, parent, p);
+ rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
+}
+
+/*
+ * Update cfqq's position in the service tree.
+ */
+static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ /*
+ * Resorting requires the cfqq to be on the RR list already.
+ */
+ if (cfq_cfqq_on_rr(cfqq))
+ cfq_service_tree_add(cfqd, cfqq, 0);
+}
+
+/*
+ * add to busy list of queues for service, trying to be fair in ordering
+ * the pending list according to last request service
+ */
+static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
+ BUG_ON(cfq_cfqq_on_rr(cfqq));
+ cfq_mark_cfqq_on_rr(cfqq);
+ cfqd->busy_queues++;
+ if (cfq_class_rt(cfqq))
+ cfqd->busy_rt_queues++;
+
+ cfq_resort_rr_list(cfqd, cfqq);
+}
+
+/*
+ * Called when the cfqq no longer has requests pending, remove it from
+ * the service tree.
+ */
+static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
+ BUG_ON(!cfq_cfqq_on_rr(cfqq));
+ cfq_clear_cfqq_on_rr(cfqq);
+
+ if (!RB_EMPTY_NODE(&cfqq->rb_node))
+ cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
+
+ BUG_ON(!cfqd->busy_queues);
+ cfqd->busy_queues--;
+ if (cfq_class_rt(cfqq))
+ cfqd->busy_rt_queues--;
+}
+
+/*
+ * rb tree support functions
+ */
+static void cfq_del_rq_rb(struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ struct cfq_data *cfqd = cfqq->cfqd;
+ const int sync = rq_is_sync(rq);
+
+ BUG_ON(!cfqq->queued[sync]);
+ cfqq->queued[sync]--;
+
+ elv_rb_del(&cfqq->sort_list, rq);
+
+ if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
+ cfq_del_cfqq_rr(cfqd, cfqq);
+}
+
+static void cfq_add_rq_rb(struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ struct cfq_data *cfqd = cfqq->cfqd;
+ struct request *__alias;
+
+ cfqq->queued[rq_is_sync(rq)]++;
+
+ /*
+ * looks a little odd, but the first insert might return an alias.
+ * if that happens, put the alias on the dispatch list
+ */
+ while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
+ cfq_dispatch_insert(cfqd->queue, __alias);
+
+ if (!cfq_cfqq_on_rr(cfqq))
+ cfq_add_cfqq_rr(cfqd, cfqq);
+
+ /*
+ * check if this request is a better next-serve candidate
+ */
+ cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
+ BUG_ON(!cfqq->next_rq);
+}
+
+static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
+{
+ elv_rb_del(&cfqq->sort_list, rq);
+ cfqq->queued[rq_is_sync(rq)]--;
+ cfq_add_rq_rb(rq);
+}
+
+static struct request *
+cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
+{
+ struct task_struct *tsk = current;
+ struct cfq_io_context *cic;
+ struct cfq_queue *cfqq;
+
+ cic = cfq_cic_lookup(cfqd, tsk->io_context);
+ if (!cic)
+ return NULL;
+
+ cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
+ if (cfqq) {
+ sector_t sector = bio->bi_sector + bio_sectors(bio);
+
+ return elv_rb_find(&cfqq->sort_list, sector);
+ }
+
+ return NULL;
+}
+
+static void cfq_activate_request(struct request_queue *q, struct request *rq)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
+ cfqd->rq_in_driver++;
+ cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
+ cfqd->rq_in_driver);
+
+ cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
+}
+
+static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+
+ WARN_ON(!cfqd->rq_in_driver);
+ cfqd->rq_in_driver--;
+ cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
+ cfqd->rq_in_driver);
+}
+
+static void cfq_remove_request(struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+
+ if (cfqq->next_rq == rq)
+ cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
+
+ list_del_init(&rq->queuelist);
+ cfq_del_rq_rb(rq);
+
+ cfqq->cfqd->rq_queued--;
+ if (rq_is_meta(rq)) {
+ WARN_ON(!cfqq->meta_pending);
+ cfqq->meta_pending--;
+ }
+}
+
+static int cfq_merge(struct request_queue *q, struct request **req,
+ struct bio *bio)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct request *__rq;
+
+ __rq = cfq_find_rq_fmerge(cfqd, bio);
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
+ *req = __rq;
+ return ELEVATOR_FRONT_MERGE;
+ }
+
+ return ELEVATOR_NO_MERGE;
+}
+
+static void cfq_merged_request(struct request_queue *q, struct request *req,
+ int type)
+{
+ if (type == ELEVATOR_FRONT_MERGE) {
+ struct cfq_queue *cfqq = RQ_CFQQ(req);
+
+ cfq_reposition_rq_rb(cfqq, req);
+ }
+}
+
+static void
+cfq_merged_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ /*
+ * reposition in fifo if next is older than rq
+ */
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
+ time_before(next->start_time, rq->start_time))
+ list_move(&rq->queuelist, &next->queuelist);
+
+ cfq_remove_request(next);
+}
+
+static int cfq_allow_merge(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_io_context *cic;
+ struct cfq_queue *cfqq;
+
+ /*
+ * Disallow merge of a sync bio into an async request.
+ */
+ if (cfq_bio_sync(bio) && !rq_is_sync(rq))
+ return 0;
+
+ /*
+ * Lookup the cfqq that this bio will be queued with. Allow
+ * merge only if rq is queued there.
+ */
+ cic = cfq_cic_lookup(cfqd, current->io_context);
+ if (!cic)
+ return 0;
+
+ cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
+ if (cfqq == RQ_CFQQ(rq))
+ return 1;
+
+ return 0;
+}
+
+static void __cfq_set_active_queue(struct cfq_data *cfqd,
+ struct cfq_queue *cfqq)
+{
+ if (cfqq) {
+ cfq_log_cfqq(cfqd, cfqq, "set_active");
+ cfqq->slice_end = 0;
+ cfq_clear_cfqq_must_alloc_slice(cfqq);
+ cfq_clear_cfqq_fifo_expire(cfqq);
+ cfq_mark_cfqq_slice_new(cfqq);
+ cfq_clear_cfqq_queue_new(cfqq);
+ }
+
+ cfqd->active_queue = cfqq;
+}
+
+/*
+ * current cfqq expired its slice (or was too idle), select new one
+ */
+static void
+__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ int timed_out)
+{
+ cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
+
+ if (cfq_cfqq_wait_request(cfqq))
+ del_timer(&cfqd->idle_slice_timer);
+
+ cfq_clear_cfqq_must_dispatch(cfqq);
+ cfq_clear_cfqq_wait_request(cfqq);
+
+ /*
+ * store what was left of this slice, if the queue idled/timed out
+ */
+ if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
+ cfqq->slice_resid = cfqq->slice_end - jiffies;
+ cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
+ }
+
+ cfq_resort_rr_list(cfqd, cfqq);
+
+ if (cfqq == cfqd->active_queue)
+ cfqd->active_queue = NULL;
+
+ if (cfqd->active_cic) {
+ put_io_context(cfqd->active_cic->ioc);
+ cfqd->active_cic = NULL;
+ }
+}
+
+static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
+{
+ struct cfq_queue *cfqq = cfqd->active_queue;
+
+ if (cfqq)
+ __cfq_slice_expired(cfqd, cfqq, timed_out);
+}
+
+/*
+ * Get next queue for service. Unless we have a queue preemption,
+ * we'll simply select the first cfqq in the service tree.
+ */
+static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
+{
+ if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
+ return NULL;
+
+ return cfq_rb_first(&cfqd->service_tree);
+}
+
+/*
+ * Get and set a new active queue for service.
+ */
+static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
+{
+ struct cfq_queue *cfqq;
+
+ cfqq = cfq_get_next_queue(cfqd);
+ __cfq_set_active_queue(cfqd, cfqq);
+ return cfqq;
+}
+
+static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
+ struct request *rq)
+{
+ if (rq->sector >= cfqd->last_position)
+ return rq->sector - cfqd->last_position;
+ else
+ return cfqd->last_position - rq->sector;
+}
+
+static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
+{
+ struct cfq_io_context *cic = cfqd->active_cic;
+
+ if (!sample_valid(cic->seek_samples))
+ return 0;
+
+ return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
+}
+
+static int cfq_close_cooperator(struct cfq_data *cfq_data,
+ struct cfq_queue *cfqq)
+{
+ /*
+ * We should notice if some of the queues are cooperating, eg
+ * working closely on the same area of the disk. In that case,
+ * we can group them together and don't waste time idling.
+ */
+ return 0;
+}
+
+#define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
+
+static void cfq_arm_slice_timer(struct cfq_data *cfqd)
+{
+ struct cfq_queue *cfqq = cfqd->active_queue;
+ struct cfq_io_context *cic;
+ unsigned long sl;
+
+ /*
+ * SSD device without seek penalty, disable idling. But only do so
+ * for devices that support queuing, otherwise we still have a problem
+ * with sync vs async workloads.
+ */
+ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
+ return;
+
+ WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
+ WARN_ON(cfq_cfqq_slice_new(cfqq));
+
+ /*
+ * idle is disabled, either manually or by past process history
+ */
+ if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
+ return;
+
+ /*
+ * still requests with the driver, don't idle
+ */
+ if (cfqd->rq_in_driver)
+ return;
+
+ /*
+ * task has exited, don't wait
+ */
+ cic = cfqd->active_cic;
+ if (!cic || !atomic_read(&cic->ioc->nr_tasks))
+ return;
+
+ /*
+ * See if this prio level has a good candidate
+ */
+ if (cfq_close_cooperator(cfqd, cfqq) &&
+ (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
+ return;
+
+ cfq_mark_cfqq_must_dispatch(cfqq);
+ cfq_mark_cfqq_wait_request(cfqq);
+
+ /*
+ * we don't want to idle for seeks, but we do want to allow
+ * fair distribution of slice time for a process doing back-to-back
+ * seeks. so allow a little bit of time for him to submit a new rq
+ */
+ sl = cfqd->cfq_slice_idle;
+ if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
+ sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
+
+ mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
+ cfq_log(cfqd, "arm_idle: %lu", sl);
+}
+
+/*
+ * Move request from internal lists to the request queue dispatch list.
+ */
+static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+
+ cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
+
+ cfq_remove_request(rq);
+ cfqq->dispatched++;
+ elv_dispatch_sort(q, rq);
+
+ if (cfq_cfqq_sync(cfqq))
+ cfqd->sync_flight++;
+}
+
+/*
+ * return expired entry, or NULL to just start from scratch in rbtree
+ */
+static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
+{
+ struct cfq_data *cfqd = cfqq->cfqd;
+ struct request *rq;
+ int fifo;
+
+ if (cfq_cfqq_fifo_expire(cfqq))
+ return NULL;
+
+ cfq_mark_cfqq_fifo_expire(cfqq);
+
+ if (list_empty(&cfqq->fifo))
+ return NULL;
+
+ fifo = cfq_cfqq_sync(cfqq);
+ rq = rq_entry_fifo(cfqq->fifo.next);
+
+ if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
+ rq = NULL;
+
+ cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
+ return rq;
+}
+
+static inline int
+cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ const int base_rq = cfqd->cfq_slice_async_rq;
+
+ WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
+
+ return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
+}
+
+/*
+ * Select a queue for service. If we have a current active queue,
+ * check whether to continue servicing it, or retrieve and set a new one.
+ */
+static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
+{
+ struct cfq_queue *cfqq;
+
+ cfqq = cfqd->active_queue;
+ if (!cfqq)
+ goto new_queue;
+
+ /*
+ * The active queue has run out of time, expire it and select new.
+ */
+ if (cfq_slice_used(cfqq))
+ goto expire;
+
+ /*
+ * If we have a RT cfqq waiting, then we pre-empt the current non-rt
+ * cfqq.
+ */
+ if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) {
+ /*
+ * We simulate this as cfqq timed out so that it gets to bank
+ * the remaining of its time slice.
+ */
+ cfq_log_cfqq(cfqd, cfqq, "preempt");
+ cfq_slice_expired(cfqd, 1);
+ goto new_queue;
+ }
+
+ /*
+ * The active queue has requests and isn't expired, allow it to
+ * dispatch.
+ */
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list))
+ goto keep_queue;
+
+ /*
+ * No requests pending. If the active queue still has requests in
+ * flight or is idling for a new request, allow either of these
+ * conditions to happen (or time out) before selecting a new queue.
+ */
+ if (timer_pending(&cfqd->idle_slice_timer) ||
+ (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
+ cfqq = NULL;
+ goto keep_queue;
+ }
+
+expire:
+ cfq_slice_expired(cfqd, 0);
+new_queue:
+ cfqq = cfq_set_active_queue(cfqd);
+keep_queue:
+ return cfqq;
+}
+
+/*
+ * Dispatch some requests from cfqq, moving them to the request queue
+ * dispatch list.
+ */
+static int
+__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ int max_dispatch)
+{
+ int dispatched = 0;
+
+ BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+
+ do {
+ struct request *rq;
+
+ /*
+ * follow expired path, else get first next available
+ */
+ rq = cfq_check_fifo(cfqq);
+ if (rq == NULL)
+ rq = cfqq->next_rq;
+
+ /*
+ * finally, insert request into driver dispatch list
+ */
+ cfq_dispatch_insert(cfqd->queue, rq);
+
+ dispatched++;
+
+ if (!cfqd->active_cic) {
+ atomic_inc(&RQ_CIC(rq)->ioc->refcount);
+ cfqd->active_cic = RQ_CIC(rq);
+ }
+
+ if (RB_EMPTY_ROOT(&cfqq->sort_list))
+ break;
+
+ /*
+ * If there is a non-empty RT cfqq waiting for current
+ * cfqq's timeslice to complete, pre-empt this cfqq
+ */
+ if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues)
+ break;
+
+ } while (dispatched < max_dispatch);
+
+ /*
+ * expire an async queue immediately if it has used up its slice. idle
+ * queue always expire after 1 dispatch round.
+ */
+ if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
+ dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
+ cfq_class_idle(cfqq))) {
+ cfqq->slice_end = jiffies + 1;
+ cfq_slice_expired(cfqd, 0);
+ }
+
+ return dispatched;
+}
+
+static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
+{
+ int dispatched = 0;
+
+ while (cfqq->next_rq) {
+ cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
+ dispatched++;
+ }
+
+ BUG_ON(!list_empty(&cfqq->fifo));
+ return dispatched;
+}
+
+/*
+ * Drain our current requests. Used for barriers and when switching
+ * io schedulers on-the-fly.
+ */
+static int cfq_forced_dispatch(struct cfq_data *cfqd)
+{
+ struct cfq_queue *cfqq;
+ int dispatched = 0;
+
+ while ((cfqq = cfq_rb_first(&cfqd->service_tree)) != NULL)
+ dispatched += __cfq_forced_dispatch_cfqq(cfqq);
+
+ cfq_slice_expired(cfqd, 0);
+
+ BUG_ON(cfqd->busy_queues);
+
+ cfq_log(cfqd, "forced_dispatch=%d\n", dispatched);
+ return dispatched;
+}
+
+static int cfq_dispatch_requests(struct request_queue *q, int force)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_queue *cfqq;
+ int dispatched;
+
+ if (!cfqd->busy_queues)
+ return 0;
+
+ if (unlikely(force))
+ return cfq_forced_dispatch(cfqd);
+
+ dispatched = 0;
+ while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
+ int max_dispatch;
+
+ max_dispatch = cfqd->cfq_quantum;
+ if (cfq_class_idle(cfqq))
+ max_dispatch = 1;
+
+ if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1)
+ break;
+
+ if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
+ break;
+
+ cfq_clear_cfqq_must_dispatch(cfqq);
+ cfq_clear_cfqq_wait_request(cfqq);
+ del_timer(&cfqd->idle_slice_timer);
+
+ dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+ }
+
+ cfq_log(cfqd, "dispatched=%d", dispatched);
+ return dispatched;
+}
+
+/*
+ * task holds one reference to the queue, dropped when task exits. each rq
+ * in-flight on this queue also holds a reference, dropped when rq is freed.
+ *
+ * queue lock must be held here.
+ */
+static void cfq_put_queue(struct cfq_queue *cfqq)
+{
+ struct cfq_data *cfqd = cfqq->cfqd;
+
+ BUG_ON(atomic_read(&cfqq->ref) <= 0);
+
+ if (!atomic_dec_and_test(&cfqq->ref))
+ return;
+
+ cfq_log_cfqq(cfqd, cfqq, "put_queue");
+ BUG_ON(rb_first(&cfqq->sort_list));
+ BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
+ BUG_ON(cfq_cfqq_on_rr(cfqq));
+
+ if (unlikely(cfqd->active_queue == cfqq)) {
+ __cfq_slice_expired(cfqd, cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
+ }
+
+ kmem_cache_free(cfq_pool, cfqq);
+}
+
+/*
+ * Must always be called with the rcu_read_lock() held
+ */
+static void
+__call_for_each_cic(struct io_context *ioc,
+ void (*func)(struct io_context *, struct cfq_io_context *))
+{
+ struct cfq_io_context *cic;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
+ func(ioc, cic);
+}
+
+/*
+ * Call func for each cic attached to this ioc.
+ */
+static void
+call_for_each_cic(struct io_context *ioc,
+ void (*func)(struct io_context *, struct cfq_io_context *))
+{
+ rcu_read_lock();
+ __call_for_each_cic(ioc, func);
+ rcu_read_unlock();
+}
+
+static void cfq_cic_free_rcu(struct rcu_head *head)
+{
+ struct cfq_io_context *cic;
+
+ cic = container_of(head, struct cfq_io_context, rcu_head);
+
+ kmem_cache_free(cfq_ioc_pool, cic);
+ elv_ioc_count_dec(ioc_count);
+
+ if (ioc_gone) {
+ /*
+ * CFQ scheduler is exiting, grab exit lock and check
+ * the pending io context count. If it hits zero,
+ * complete ioc_gone and set it back to NULL
+ */
+ spin_lock(&ioc_gone_lock);
+ if (ioc_gone && !elv_ioc_count_read(ioc_count)) {
+ complete(ioc_gone);
+ ioc_gone = NULL;
+ }
+ spin_unlock(&ioc_gone_lock);
+ }
+}
+
+static void cfq_cic_free(struct cfq_io_context *cic)
+{
+ call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
+}
+
+static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
+{
+ unsigned long flags;
+
+ BUG_ON(!cic->dead_key);
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ radix_tree_delete(&ioc->radix_root, cic->dead_key);
+ hlist_del_rcu(&cic->cic_list);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+
+ cfq_cic_free(cic);
+}
+
+/*
+ * Must be called with rcu_read_lock() held or preemption otherwise disabled.
+ * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
+ * and ->trim() which is called with the task lock held
+ */
+static void cfq_free_io_context(struct io_context *ioc)
+{
+ /*
+ * ioc->refcount is zero here, or we are called from elv_unregister(),
+ * so no more cic's are allowed to be linked into this ioc. So it
+ * should be ok to iterate over the known list, we will see all cic's
+ * since no new ones are added.
+ */
+ __call_for_each_cic(ioc, cic_free_func);
+}
+
+static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ if (unlikely(cfqq == cfqd->active_queue)) {
+ __cfq_slice_expired(cfqd, cfqq, 0);
+ cfq_schedule_dispatch(cfqd);
+ }
+
+ cfq_put_queue(cfqq);
+}
+
+static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
+ struct cfq_io_context *cic)
+{
+ struct io_context *ioc = cic->ioc;
+
+ list_del_init(&cic->queue_list);
+
+ /*
+ * Make sure key == NULL is seen for dead queues
+ */
+ smp_wmb();
+ cic->dead_key = (unsigned long) cic->key;
+ cic->key = NULL;
+
+ if (ioc->ioc_data == cic)
+ rcu_assign_pointer(ioc->ioc_data, NULL);
+
+ if (cic->cfqq[ASYNC]) {
+ cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
+ cic->cfqq[ASYNC] = NULL;
+ }
+
+ if (cic->cfqq[SYNC]) {
+ cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
+ cic->cfqq[SYNC] = NULL;
+ }
+}
+
+static void cfq_exit_single_io_context(struct io_context *ioc,
+ struct cfq_io_context *cic)
+{
+ struct cfq_data *cfqd = cic->key;
+
+ if (cfqd) {
+ struct request_queue *q = cfqd->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ /*
+ * Ensure we get a fresh copy of the ->key to prevent
+ * race between exiting task and queue
+ */
+ smp_read_barrier_depends();
+ if (cic->key)
+ __cfq_exit_single_io_context(cfqd, cic);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
+
+/*
+ * The process that ioc belongs to has exited, we need to clean up
+ * and put the internal structures we have that belongs to that process.
+ */
+static void cfq_exit_io_context(struct io_context *ioc)
+{
+ call_for_each_cic(ioc, cfq_exit_single_io_context);
+}
+
+static struct cfq_io_context *
+cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+{
+ struct cfq_io_context *cic;
+
+ cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
+ if (cic) {
+ cic->last_end_request = jiffies;
+ INIT_LIST_HEAD(&cic->queue_list);
+ INIT_HLIST_NODE(&cic->cic_list);
+ cic->dtor = cfq_free_io_context;
+ cic->exit = cfq_exit_io_context;
+ elv_ioc_count_inc(ioc_count);
+ }
+
+ return cic;
+}
+
+static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
+{
+ struct task_struct *tsk = current;
+ int ioprio_class;
+
+ if (!cfq_cfqq_prio_changed(cfqq))
+ return;
+
+ ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
+ switch (ioprio_class) {
+ default:
+ printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
+ case IOPRIO_CLASS_NONE:
+ /*
+ * no prio set, inherit CPU scheduling settings
+ */
+ cfqq->ioprio = task_nice_ioprio(tsk);
+ cfqq->ioprio_class = task_nice_ioclass(tsk);
+ break;
+ case IOPRIO_CLASS_RT:
+ cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio_class = IOPRIO_CLASS_RT;
+ break;
+ case IOPRIO_CLASS_BE:
+ cfqq->ioprio = task_ioprio(ioc);
+ cfqq->ioprio_class = IOPRIO_CLASS_BE;
+ break;
+ case IOPRIO_CLASS_IDLE:
+ cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
+ cfqq->ioprio = 7;
+ cfq_clear_cfqq_idle_window(cfqq);
+ break;
+ }
+
+ /*
+ * keep track of original prio settings in case we have to temporarily
+ * elevate the priority of this queue
+ */
+ cfqq->org_ioprio = cfqq->ioprio;
+ cfqq->org_ioprio_class = cfqq->ioprio_class;
+ cfq_clear_cfqq_prio_changed(cfqq);
+}
+
+static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
+{
+ struct cfq_data *cfqd = cic->key;
+ struct cfq_queue *cfqq;
+ unsigned long flags;
+
+ if (unlikely(!cfqd))
+ return;
+
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+ cfqq = cic->cfqq[ASYNC];
+ if (cfqq) {
+ struct cfq_queue *new_cfqq;
+ new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC);
+ if (new_cfqq) {
+ cic->cfqq[ASYNC] = new_cfqq;
+ cfq_put_queue(cfqq);
+ }
+ }
+
+ cfqq = cic->cfqq[SYNC];
+ if (cfqq)
+ cfq_mark_cfqq_prio_changed(cfqq);
+
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+static void cfq_ioc_set_ioprio(struct io_context *ioc)
+{
+ call_for_each_cic(ioc, changed_ioprio);
+ ioc->ioprio_changed = 0;
+}
+
+static struct cfq_queue *
+cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+ struct io_context *ioc, gfp_t gfp_mask)
+{
+ struct cfq_queue *cfqq, *new_cfqq = NULL;
+ struct cfq_io_context *cic;
+
+retry:
+ cic = cfq_cic_lookup(cfqd, ioc);
+ /* cic always exists here */
+ cfqq = cic_to_cfqq(cic, is_sync);
+
+ if (!cfqq) {
+ if (new_cfqq) {
+ cfqq = new_cfqq;
+ new_cfqq = NULL;
+ } else if (gfp_mask & __GFP_WAIT) {
+ /*
+ * Inform the allocator of the fact that we will
+ * just repeat this allocation if it fails, to allow
+ * the allocator to do whatever it needs to attempt to
+ * free memory.
+ */
+ spin_unlock_irq(cfqd->queue->queue_lock);
+ new_cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_NOFAIL | __GFP_ZERO,
+ cfqd->queue->node);
+ spin_lock_irq(cfqd->queue->queue_lock);
+ goto retry;
+ } else {
+ cfqq = kmem_cache_alloc_node(cfq_pool,
+ gfp_mask | __GFP_ZERO,
+ cfqd->queue->node);
+ if (!cfqq)
+ goto out;
+ }
+
+ RB_CLEAR_NODE(&cfqq->rb_node);
+ INIT_LIST_HEAD(&cfqq->fifo);
+
+ atomic_set(&cfqq->ref, 0);
+ cfqq->cfqd = cfqd;
+
+ cfq_mark_cfqq_prio_changed(cfqq);
+ cfq_mark_cfqq_queue_new(cfqq);
+
+ cfq_init_prio_data(cfqq, ioc);
+
+ if (is_sync) {
+ if (!cfq_class_idle(cfqq))
+ cfq_mark_cfqq_idle_window(cfqq);
+ cfq_mark_cfqq_sync(cfqq);
+ }
+ cfqq->pid = current->pid;
+ cfq_log_cfqq(cfqd, cfqq, "alloced");
+ }
+
+ if (new_cfqq)
+ kmem_cache_free(cfq_pool, new_cfqq);
+
+out:
+ WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
+ return cfqq;
+}
+
+static struct cfq_queue **
+cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
+{
+ switch (ioprio_class) {
+ case IOPRIO_CLASS_RT:
+ return &cfqd->async_cfqq[0][ioprio];
+ case IOPRIO_CLASS_BE:
+ return &cfqd->async_cfqq[1][ioprio];
+ case IOPRIO_CLASS_IDLE:
+ return &cfqd->async_idle_cfqq;
+ default:
+ BUG();
+ }
+}
+
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
+ gfp_t gfp_mask)
+{
+ const int ioprio = task_ioprio(ioc);
+ const int ioprio_class = task_ioprio_class(ioc);
+ struct cfq_queue **async_cfqq = NULL;
+ struct cfq_queue *cfqq = NULL;
+
+ if (!is_sync) {
+ async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
+ cfqq = *async_cfqq;
+ }
+
+ if (!cfqq) {
+ cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
+ if (!cfqq)
+ return NULL;
+ }
+
+ /*
+ * pin the queue now that it's allocated, scheduler exit will prune it
+ */
+ if (!is_sync && !(*async_cfqq)) {
+ atomic_inc(&cfqq->ref);
+ *async_cfqq = cfqq;
+ }
+
+ atomic_inc(&cfqq->ref);
+ return cfqq;
+}
+
+/*
+ * We drop cfq io contexts lazily, so we may find a dead one.
+ */
+static void
+cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
+ struct cfq_io_context *cic)
+{
+ unsigned long flags;
+
+ WARN_ON(!list_empty(&cic->queue_list));
+
+ spin_lock_irqsave(&ioc->lock, flags);
+
+ BUG_ON(ioc->ioc_data == cic);
+
+ radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
+ hlist_del_rcu(&cic->cic_list);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+
+ cfq_cic_free(cic);
+}
+
+static struct cfq_io_context *
+cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
+{
+ struct cfq_io_context *cic;
+ unsigned long flags;
+ void *k;
+
+ if (unlikely(!ioc))
+ return NULL;
+
+ rcu_read_lock();
+
+ /*
+ * we maintain a last-hit cache, to avoid browsing over the tree
+ */
+ cic = rcu_dereference(ioc->ioc_data);
+ if (cic && cic->key == cfqd) {
+ rcu_read_unlock();
+ return cic;
+ }
+
+ do {
+ cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
+ rcu_read_unlock();
+ if (!cic)
+ break;
+ /* ->key must be copied to avoid race with cfq_exit_queue() */
+ k = cic->key;
+ if (unlikely(!k)) {
+ cfq_drop_dead_cic(cfqd, ioc, cic);
+ rcu_read_lock();
+ continue;
+ }
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ rcu_assign_pointer(ioc->ioc_data, cic);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ break;
+ } while (1);
+
+ return cic;
+}
+
+/*
+ * Add cic into ioc, using cfqd as the search key. This enables us to lookup
+ * the process specific cfq io context when entered from the block layer.
+ * Also adds the cic to a per-cfqd list, used when this queue is removed.
+ */
+static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
+ struct cfq_io_context *cic, gfp_t gfp_mask)
+{
+ unsigned long flags;
+ int ret;
+
+ ret = radix_tree_preload(gfp_mask);
+ if (!ret) {
+ cic->ioc = ioc;
+ cic->key = cfqd;
+
+ spin_lock_irqsave(&ioc->lock, flags);
+ ret = radix_tree_insert(&ioc->radix_root,
+ (unsigned long) cfqd, cic);
+ if (!ret)
+ hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+
+ radix_tree_preload_end();
+
+ if (!ret) {
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+ list_add(&cic->queue_list, &cfqd->cic_list);
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+ }
+ }
+
+ if (ret)
+ printk(KERN_ERR "cfq: cic link failed!\n");
+
+ return ret;
+}
+
+/*
+ * Setup general io context and cfq io context. There can be several cfq
+ * io contexts per general io context, if this process is doing io to more
+ * than one device managed by cfq.
+ */
+static struct cfq_io_context *
+cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
+{
+ struct io_context *ioc = NULL;
+ struct cfq_io_context *cic;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ ioc = get_io_context(gfp_mask, cfqd->queue->node);
+ if (!ioc)
+ return NULL;
+
+ cic = cfq_cic_lookup(cfqd, ioc);
+ if (cic)
+ goto out;
+
+ cic = cfq_alloc_io_context(cfqd, gfp_mask);
+ if (cic == NULL)
+ goto err;
+
+ if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+ goto err_free;
+
+out:
+ smp_read_barrier_depends();
+ if (unlikely(ioc->ioprio_changed))
+ cfq_ioc_set_ioprio(ioc);
+
+ return cic;
+err_free:
+ cfq_cic_free(cic);
+err:
+ put_io_context(ioc);
+ return NULL;
+}
+
+static void
+cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
+{
+ unsigned long elapsed = jiffies - cic->last_end_request;
+ unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
+
+ cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
+ cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
+ cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
+}
+
+static void
+cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
+ struct request *rq)
+{
+ sector_t sdist;
+ u64 total;
+
+ if (cic->last_request_pos < rq->sector)
+ sdist = rq->sector - cic->last_request_pos;
+ else
+ sdist = cic->last_request_pos - rq->sector;
+
+ /*
+ * Don't allow the seek distance to get too large from the
+ * odd fragment, pagein, etc
+ */
+ if (cic->seek_samples <= 60) /* second&third seek */
+ sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
+ else
+ sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
+
+ cic->seek_samples = (7*cic->seek_samples + 256) / 8;
+ cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
+ total = cic->seek_total + (cic->seek_samples/2);
+ do_div(total, cic->seek_samples);
+ cic->seek_mean = (sector_t)total;
+}
+
+/*
+ * Disable idle window if the process thinks too long or seeks so much that
+ * it doesn't matter
+ */
+static void
+cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct cfq_io_context *cic)
+{
+ int old_idle, enable_idle;
+
+ /*
+ * Don't idle for async or idle io prio class
+ */
+ if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
+ return;
+
+ enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
+
+ if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
+ (cfqd->hw_tag && CIC_SEEKY(cic)))
+ enable_idle = 0;
+ else if (sample_valid(cic->ttime_samples)) {
+ if (cic->ttime_mean > cfqd->cfq_slice_idle)
+ enable_idle = 0;
+ else
+ enable_idle = 1;
+ }
+
+ if (old_idle != enable_idle) {
+ cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
+ if (enable_idle)
+ cfq_mark_cfqq_idle_window(cfqq);
+ else
+ cfq_clear_cfqq_idle_window(cfqq);
+ }
+}
+
+/*
+ * Check if new_cfqq should preempt the currently active queue. Return 0 for
+ * no or if we aren't sure, a 1 will cause a preempt.
+ */
+static int
+cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
+ struct request *rq)
+{
+ struct cfq_queue *cfqq;
+
+ cfqq = cfqd->active_queue;
+ if (!cfqq)
+ return 0;
+
+ if (cfq_slice_used(cfqq))
+ return 1;
+
+ if (cfq_class_idle(new_cfqq))
+ return 0;
+
+ if (cfq_class_idle(cfqq))
+ return 1;
+
+ /*
+ * if the new request is sync, but the currently running queue is
+ * not, let the sync request have priority.
+ */
+ if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
+ return 1;
+
+ /*
+ * So both queues are sync. Let the new request get disk time if
+ * it's a metadata request and the current queue is doing regular IO.
+ */
+ if (rq_is_meta(rq) && !cfqq->meta_pending)
+ return 1;
+
+ /*
+ * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
+ */
+ if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
+ return 1;
+
+ if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
+ return 0;
+
+ /*
+ * if this request is as-good as one we would expect from the
+ * current cfqq, let it preempt
+ */
+ if (cfq_rq_close(cfqd, rq))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * cfqq preempts the active queue. if we allowed preempt with no slice left,
+ * let it have half of its nominal slice.
+ */
+static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+ cfq_log_cfqq(cfqd, cfqq, "preempt");
+ cfq_slice_expired(cfqd, 1);
+
+ /*
+ * Put the new queue at the front of the of the current list,
+ * so we know that it will be selected next.
+ */
+ BUG_ON(!cfq_cfqq_on_rr(cfqq));
+
+ cfq_service_tree_add(cfqd, cfqq, 1);
+
+ cfqq->slice_end = 0;
+ cfq_mark_cfqq_slice_new(cfqq);
+}
+
+/*
+ * Called when a new fs request (rq) is added (to cfqq). Check if there's
+ * something we should do about it
+ */
+static void
+cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+ struct request *rq)
+{
+ struct cfq_io_context *cic = RQ_CIC(rq);
+
+ cfqd->rq_queued++;
+ if (rq_is_meta(rq))
+ cfqq->meta_pending++;
+
+ cfq_update_io_thinktime(cfqd, cic);
+ cfq_update_io_seektime(cfqd, cic, rq);
+ cfq_update_idle_window(cfqd, cfqq, cic);
+
+ cic->last_request_pos = rq->sector + rq->nr_sectors;
+
+ if (cfqq == cfqd->active_queue) {
+ /*
+ * if we are waiting for a request for this queue, let it rip
+ * immediately and flag that we must not expire this queue
+ * just now
+ */
+ if (cfq_cfqq_wait_request(cfqq)) {
+ cfq_mark_cfqq_must_dispatch(cfqq);
+ del_timer(&cfqd->idle_slice_timer);
+ blk_start_queueing(cfqd->queue);
+ }
+ } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
+ /*
+ * not the active queue - expire current slice if it is
+ * idle and has expired it's mean thinktime or this new queue
+ * has some old slice time left and is of higher priority or
+ * this new queue is RT and the current one is BE
+ */
+ cfq_preempt_queue(cfqd, cfqq);
+ cfq_mark_cfqq_must_dispatch(cfqq);
+ blk_start_queueing(cfqd->queue);
+ }
+}
+
+static void cfq_insert_request(struct request_queue *q, struct request *rq)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+
+ cfq_log_cfqq(cfqd, cfqq, "insert_request");
+ cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
+
+ cfq_add_rq_rb(rq);
+
+ list_add_tail(&rq->queuelist, &cfqq->fifo);
+
+ cfq_rq_enqueued(cfqd, cfqq, rq);
+}
+
+/*
+ * Update hw_tag based on peak queue depth over 50 samples under
+ * sufficient load.
+ */
+static void cfq_update_hw_tag(struct cfq_data *cfqd)
+{
+ if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak)
+ cfqd->rq_in_driver_peak = cfqd->rq_in_driver;
+
+ if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
+ cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
+ return;
+
+ if (cfqd->hw_tag_samples++ < 50)
+ return;
+
+ if (cfqd->rq_in_driver_peak >= CFQ_HW_QUEUE_MIN)
+ cfqd->hw_tag = 1;
+ else
+ cfqd->hw_tag = 0;
+
+ cfqd->hw_tag_samples = 0;
+ cfqd->rq_in_driver_peak = 0;
+}
+
+static void cfq_completed_request(struct request_queue *q, struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+ struct cfq_data *cfqd = cfqq->cfqd;
+ const int sync = rq_is_sync(rq);
+ unsigned long now;
+
+ now = jiffies;
+ cfq_log_cfqq(cfqd, cfqq, "complete");
+
+ cfq_update_hw_tag(cfqd);
+
+ WARN_ON(!cfqd->rq_in_driver);
+ WARN_ON(!cfqq->dispatched);
+ cfqd->rq_in_driver--;
+ cfqq->dispatched--;
+
+ if (cfq_cfqq_sync(cfqq))
+ cfqd->sync_flight--;
+
+ if (!cfq_class_idle(cfqq))
+ cfqd->last_end_request = now;
+
+ if (sync)
+ RQ_CIC(rq)->last_end_request = now;
+
+ /*
+ * If this is the active queue, check if it needs to be expired,
+ * or if we want to idle in case it has no pending requests.
+ */
+ if (cfqd->active_queue == cfqq) {
+ if (cfq_cfqq_slice_new(cfqq)) {
+ cfq_set_prio_slice(cfqd, cfqq);
+ cfq_clear_cfqq_slice_new(cfqq);
+ }
+ if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
+ cfq_slice_expired(cfqd, 1);
+ else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
+ cfq_arm_slice_timer(cfqd);
+ }
+
+ if (!cfqd->rq_in_driver)
+ cfq_schedule_dispatch(cfqd);
+}
+
+/*
+ * we temporarily boost lower priority queues if they are holding fs exclusive
+ * resources. they are boosted to normal prio (CLASS_BE/4)
+ */
+static void cfq_prio_boost(struct cfq_queue *cfqq)
+{
+ if (has_fs_excl()) {
+ /*
+ * boost idle prio on transactions that would lock out other
+ * users of the filesystem
+ */
+ if (cfq_class_idle(cfqq))
+ cfqq->ioprio_class = IOPRIO_CLASS_BE;
+ if (cfqq->ioprio > IOPRIO_NORM)
+ cfqq->ioprio = IOPRIO_NORM;
+ } else {
+ /*
+ * check if we need to unboost the queue
+ */
+ if (cfqq->ioprio_class != cfqq->org_ioprio_class)
+ cfqq->ioprio_class = cfqq->org_ioprio_class;
+ if (cfqq->ioprio != cfqq->org_ioprio)
+ cfqq->ioprio = cfqq->org_ioprio;
+ }
+}
+
+static inline int __cfq_may_queue(struct cfq_queue *cfqq)
+{
+ if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
+ !cfq_cfqq_must_alloc_slice(cfqq)) {
+ cfq_mark_cfqq_must_alloc_slice(cfqq);
+ return ELV_MQUEUE_MUST;
+ }
+
+ return ELV_MQUEUE_MAY;
+}
+
+static int cfq_may_queue(struct request_queue *q, int rw)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct task_struct *tsk = current;
+ struct cfq_io_context *cic;
+ struct cfq_queue *cfqq;
+
+ /*
+ * don't force setup of a queue from here, as a call to may_queue
+ * does not necessarily imply that a request actually will be queued.
+ * so just lookup a possibly existing queue, or return 'may queue'
+ * if that fails
+ */
+ cic = cfq_cic_lookup(cfqd, tsk->io_context);
+ if (!cic)
+ return ELV_MQUEUE_MAY;
+
+ cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC);
+ if (cfqq) {
+ cfq_init_prio_data(cfqq, cic->ioc);
+ cfq_prio_boost(cfqq);
+
+ return __cfq_may_queue(cfqq);
+ }
+
+ return ELV_MQUEUE_MAY;
+}
+
+/*
+ * queue lock held here
+ */
+static void cfq_put_request(struct request *rq)
+{
+ struct cfq_queue *cfqq = RQ_CFQQ(rq);
+
+ if (cfqq) {
+ const int rw = rq_data_dir(rq);
+
+ BUG_ON(!cfqq->allocated[rw]);
+ cfqq->allocated[rw]--;
+
+ put_io_context(RQ_CIC(rq)->ioc);
+
+ rq->elevator_private = NULL;
+ rq->elevator_private2 = NULL;
+
+ cfq_put_queue(cfqq);
+ }
+}
+
+/*
+ * Allocate cfq data structures associated with this request.
+ */
+static int
+cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+{
+ struct cfq_data *cfqd = q->elevator->elevator_data;
+ struct cfq_io_context *cic;
+ const int rw = rq_data_dir(rq);
+ const int is_sync = rq_is_sync(rq);
+ struct cfq_queue *cfqq;
+ unsigned long flags;
+
+ might_sleep_if(gfp_mask & __GFP_WAIT);
+
+ cic = cfq_get_io_context(cfqd, gfp_mask);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ if (!cic)
+ goto queue_fail;
+
+ cfqq = cic_to_cfqq(cic, is_sync);
+ if (!cfqq) {
+ cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
+
+ if (!cfqq)
+ goto queue_fail;
+
+ cic_set_cfqq(cic, cfqq, is_sync);
+ }
+
+ cfqq->allocated[rw]++;
+ cfq_clear_cfqq_must_alloc(cfqq);
+ atomic_inc(&cfqq->ref);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ rq->elevator_private = cic;
+ rq->elevator_private2 = cfqq;
+ return 0;
+
+queue_fail:
+ if (cic)
+ put_io_context(cic->ioc);
+
+ cfq_schedule_dispatch(cfqd);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ cfq_log(cfqd, "set_request fail");
+ return 1;
+}
+
+static void cfq_kick_queue(struct work_struct *work)
+{
+ struct cfq_data *cfqd =
+ container_of(work, struct cfq_data, unplug_work);
+ struct request_queue *q = cfqd->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queueing(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * Timer running if the active_queue is currently idling inside its time slice
+ */
+static void cfq_idle_slice_timer(unsigned long data)
+{
+ struct cfq_data *cfqd = (struct cfq_data *) data;
+ struct cfq_queue *cfqq;
+ unsigned long flags;
+ int timed_out = 1;
+
+ cfq_log(cfqd, "idle timer fired");
+
+ spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+ cfqq = cfqd->active_queue;
+ if (cfqq) {
+ timed_out = 0;
+
+ /*
+ * expired
+ */
+ if (cfq_slice_used(cfqq))
+ goto expire;
+
+ /*
+ * only expire and reinvoke request handler, if there are
+ * other queues with pending requests
+ */
+ if (!cfqd->busy_queues)
+ goto out_cont;
+
+ /*
+ * not expired and it has a request pending, let it dispatch
+ */
+ if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
+ cfq_mark_cfqq_must_dispatch(cfqq);
+ goto out_kick;
+ }
+ }
+expire:
+ cfq_slice_expired(cfqd, timed_out);
+out_kick:
+ cfq_schedule_dispatch(cfqd);
+out_cont:
+ spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
+{
+ del_timer_sync(&cfqd->idle_slice_timer);
+ cancel_work_sync(&cfqd->unplug_work);
+}
+
+static void cfq_put_async_queues(struct cfq_data *cfqd)
+{
+ int i;
+
+ for (i = 0; i < IOPRIO_BE_NR; i++) {
+ if (cfqd->async_cfqq[0][i])
+ cfq_put_queue(cfqd->async_cfqq[0][i]);
+ if (cfqd->async_cfqq[1][i])
+ cfq_put_queue(cfqd->async_cfqq[1][i]);
+ }
+
+ if (cfqd->async_idle_cfqq)
+ cfq_put_queue(cfqd->async_idle_cfqq);
+}
+
+static void cfq_exit_queue(struct elevator_queue *e)
+{
+ struct cfq_data *cfqd = e->elevator_data;
+ struct request_queue *q = cfqd->queue;
+
+ cfq_shutdown_timer_wq(cfqd);
+
+ spin_lock_irq(q->queue_lock);
+
+ if (cfqd->active_queue)
+ __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
+
+ while (!list_empty(&cfqd->cic_list)) {
+ struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
+ struct cfq_io_context,
+ queue_list);
+
+ __cfq_exit_single_io_context(cfqd, cic);
+ }
+
+ cfq_put_async_queues(cfqd);
+
+ spin_unlock_irq(q->queue_lock);
+
+ cfq_shutdown_timer_wq(cfqd);
+
+ kfree(cfqd);
+}
+
+static void *cfq_init_queue(struct request_queue *q)
+{
+ struct cfq_data *cfqd;
+
+ cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
+ if (!cfqd)
+ return NULL;
+
+ cfqd->service_tree = CFQ_RB_ROOT;
+ INIT_LIST_HEAD(&cfqd->cic_list);
+
+ cfqd->queue = q;
+
+ init_timer(&cfqd->idle_slice_timer);
+ cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
+ cfqd->idle_slice_timer.data = (unsigned long) cfqd;
+
+ INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
+
+ cfqd->last_end_request = jiffies;
+ cfqd->cfq_quantum = cfq_quantum;
+ cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
+ cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
+ cfqd->cfq_back_max = cfq_back_max;
+ cfqd->cfq_back_penalty = cfq_back_penalty;
+ cfqd->cfq_slice[0] = cfq_slice_async;
+ cfqd->cfq_slice[1] = cfq_slice_sync;
+ cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
+ cfqd->cfq_slice_idle = cfq_slice_idle;
+ cfqd->hw_tag = 1;
+
+ return cfqd;
+}
+
+static void cfq_slab_kill(void)
+{
+ /*
+ * Caller already ensured that pending RCU callbacks are completed,
+ * so we should have no busy allocations at this point.
+ */
+ if (cfq_pool)
+ kmem_cache_destroy(cfq_pool);
+ if (cfq_ioc_pool)
+ kmem_cache_destroy(cfq_ioc_pool);
+}
+
+static int __init cfq_slab_setup(void)
+{
+ cfq_pool = KMEM_CACHE(cfq_queue, 0);
+ if (!cfq_pool)
+ goto fail;
+
+ cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
+ if (!cfq_ioc_pool)
+ goto fail;
+
+ return 0;
+fail:
+ cfq_slab_kill();
+ return -ENOMEM;
+}
+
+/*
+ * sysfs parts below -->
+ */
+static ssize_t
+cfq_var_show(unsigned int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+cfq_var_store(unsigned int *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtoul(p, &p, 10);
+ return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct cfq_data *cfqd = e->elevator_data; \
+ unsigned int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return cfq_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
+SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
+SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
+SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
+SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
+SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
+SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
+SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
+SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct cfq_data *cfqd = e->elevator_data; \
+ unsigned int __data; \
+ int ret = cfq_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
+ return ret; \
+}
+STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
+ UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
+ UINT_MAX, 1);
+STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
+STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
+ UINT_MAX, 0);
+STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
+ UINT_MAX, 0);
+#undef STORE_FUNCTION
+
+#define CFQ_ATTR(name) \
+ __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
+
+static struct elv_fs_entry cfq_attrs[] = {
+ CFQ_ATTR(quantum),
+ CFQ_ATTR(fifo_expire_sync),
+ CFQ_ATTR(fifo_expire_async),
+ CFQ_ATTR(back_seek_max),
+ CFQ_ATTR(back_seek_penalty),
+ CFQ_ATTR(slice_sync),
+ CFQ_ATTR(slice_async),
+ CFQ_ATTR(slice_async_rq),
+ CFQ_ATTR(slice_idle),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_cfq = {
+ .ops = {
+ .elevator_merge_fn = cfq_merge,
+ .elevator_merged_fn = cfq_merged_request,
+ .elevator_merge_req_fn = cfq_merged_requests,
+ .elevator_allow_merge_fn = cfq_allow_merge,
+ .elevator_dispatch_fn = cfq_dispatch_requests,
+ .elevator_add_req_fn = cfq_insert_request,
+ .elevator_activate_req_fn = cfq_activate_request,
+ .elevator_deactivate_req_fn = cfq_deactivate_request,
+ .elevator_queue_empty_fn = cfq_queue_empty,
+ .elevator_completed_req_fn = cfq_completed_request,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
+ .elevator_set_req_fn = cfq_set_request,
+ .elevator_put_req_fn = cfq_put_request,
+ .elevator_may_queue_fn = cfq_may_queue,
+ .elevator_init_fn = cfq_init_queue,
+ .elevator_exit_fn = cfq_exit_queue,
+ .trim = cfq_free_io_context,
+ },
+ .elevator_attrs = cfq_attrs,
+ .elevator_name = "cfq",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init cfq_init(void)
+{
+ /*
+ * could be 0 on HZ < 1000 setups
+ */
+ if (!cfq_slice_async)
+ cfq_slice_async = 1;
+ if (!cfq_slice_idle)
+ cfq_slice_idle = 1;
+
+ if (cfq_slab_setup())
+ return -ENOMEM;
+
+ elv_register(&iosched_cfq);
+
+ return 0;
+}
+
+static void __exit cfq_exit(void)
+{
+ DECLARE_COMPLETION_ONSTACK(all_gone);
+ elv_unregister(&iosched_cfq);
+ ioc_gone = &all_gone;
+ /* ioc_gone's update must be visible before reading ioc_count */
+ smp_wmb();
+
+ /*
+ * this also protects us from entering cfq_slab_kill() with
+ * pending RCU callbacks
+ */
+ if (elv_ioc_count_read(ioc_count))
+ wait_for_completion(&all_gone);
+ cfq_slab_kill();
+}
+
+module_init(cfq_init);
+module_exit(cfq_exit);
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
diff --git a/libdde-linux26/contrib/block/deadline-iosched.c b/libdde-linux26/contrib/block/deadline-iosched.c
new file mode 100644
index 00000000..9bb6f050
--- /dev/null
+++ b/libdde-linux26/contrib/block/deadline-iosched.c
@@ -0,0 +1,478 @@
+/*
+ * Deadline i/o scheduler.
+ *
+ * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <ddekit/timer.h>
+
+/*
+ * See Documentation/block/deadline-iosched.txt
+ */
+static const int read_expire = HZ / 2; /* max time before a read is submitted. */
+static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
+static const int writes_starved = 2; /* max times reads can starve a write */
+static const int fifo_batch = 16; /* # of sequential requests treated as one
+ by the above parameters. For throughput. */
+
+struct deadline_data {
+ /*
+ * run time data
+ */
+
+ /*
+ * requests (deadline_rq s) are present on both sort_list and fifo_list
+ */
+ struct rb_root sort_list[2];
+ struct list_head fifo_list[2];
+
+ /*
+ * next in sort order. read, write or both are NULL
+ */
+ struct request *next_rq[2];
+ unsigned int batching; /* number of sequential requests made */
+ sector_t last_sector; /* head position */
+ unsigned int starved; /* times reads have starved writes */
+
+ /*
+ * settings that change how the i/o scheduler behaves
+ */
+ int fifo_expire[2];
+ int fifo_batch;
+ int writes_starved;
+ int front_merges;
+};
+
+static void deadline_move_request(struct deadline_data *, struct request *);
+
+static inline struct rb_root *
+deadline_rb_root(struct deadline_data *dd, struct request *rq)
+{
+ return &dd->sort_list[rq_data_dir(rq)];
+}
+
+/*
+ * get the request after `rq' in sector-sorted order
+ */
+static inline struct request *
+deadline_latter_request(struct request *rq)
+{
+ struct rb_node *node = rb_next(&rq->rb_node);
+
+ if (node)
+ return rb_entry_rq(node);
+
+ return NULL;
+}
+
+static void
+deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
+{
+ struct rb_root *root = deadline_rb_root(dd, rq);
+ struct request *__alias;
+
+ while (unlikely(__alias = elv_rb_add(root, rq)))
+ deadline_move_request(dd, __alias);
+}
+
+static inline void
+deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
+{
+ const int data_dir = rq_data_dir(rq);
+
+ if (dd->next_rq[data_dir] == rq)
+ dd->next_rq[data_dir] = deadline_latter_request(rq);
+
+ elv_rb_del(deadline_rb_root(dd, rq), rq);
+}
+
+/*
+ * add rq to rbtree and fifo
+ */
+static void
+deadline_add_request(struct request_queue *q, struct request *rq)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ const int data_dir = rq_data_dir(rq);
+
+ deadline_add_rq_rb(dd, rq);
+
+ /*
+ * set expire time and add to fifo list
+ */
+ rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
+ list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
+}
+
+/*
+ * remove rq from rbtree and fifo.
+ */
+static void deadline_remove_request(struct request_queue *q, struct request *rq)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ rq_fifo_clear(rq);
+ deadline_del_rq_rb(dd, rq);
+}
+
+static int
+deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ struct request *__rq;
+ int ret;
+
+ /*
+ * check for front merge
+ */
+ if (dd->front_merges) {
+ sector_t sector = bio->bi_sector + bio_sectors(bio);
+
+ __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
+ if (__rq) {
+ BUG_ON(sector != __rq->sector);
+
+ if (elv_rq_merge_ok(__rq, bio)) {
+ ret = ELEVATOR_FRONT_MERGE;
+ goto out;
+ }
+ }
+ }
+
+ return ELEVATOR_NO_MERGE;
+out:
+ *req = __rq;
+ return ret;
+}
+
+static void deadline_merged_request(struct request_queue *q,
+ struct request *req, int type)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ /*
+ * if the merge was a front merge, we need to reposition request
+ */
+ if (type == ELEVATOR_FRONT_MERGE) {
+ elv_rb_del(deadline_rb_root(dd, req), req);
+ deadline_add_rq_rb(dd, req);
+ }
+}
+
+static void
+deadline_merged_requests(struct request_queue *q, struct request *req,
+ struct request *next)
+{
+ /*
+ * if next expires before rq, assign its expire time to rq
+ * and move into next position (next will be deleted) in fifo
+ */
+ if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
+ list_move(&req->queuelist, &next->queuelist);
+ rq_set_fifo_time(req, rq_fifo_time(next));
+ }
+ }
+
+ /*
+ * kill knowledge of next, this one is a goner
+ */
+ deadline_remove_request(q, next);
+}
+
+/*
+ * move request from sort list to dispatch queue.
+ */
+static inline void
+deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ deadline_remove_request(q, rq);
+ elv_dispatch_add_tail(q, rq);
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void
+deadline_move_request(struct deadline_data *dd, struct request *rq)
+{
+ const int data_dir = rq_data_dir(rq);
+
+ dd->next_rq[READ] = NULL;
+ dd->next_rq[WRITE] = NULL;
+ dd->next_rq[data_dir] = deadline_latter_request(rq);
+
+ dd->last_sector = rq_end_sector(rq);
+
+ /*
+ * take it off the sort and fifo list, move
+ * to dispatch queue
+ */
+ deadline_move_to_dispatch(dd, rq);
+}
+
+/*
+ * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
+ * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
+ */
+static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
+{
+ struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
+
+ /*
+ * rq is expired!
+ */
+ if (time_after(jiffies, rq_fifo_time(rq)))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * deadline_dispatch_requests selects the best request according to
+ * read/write expire, fifo_batch, etc
+ */
+static int deadline_dispatch_requests(struct request_queue *q, int force)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+ const int reads = !list_empty(&dd->fifo_list[READ]);
+ const int writes = !list_empty(&dd->fifo_list[WRITE]);
+ struct request *rq;
+ int data_dir;
+
+ /*
+ * batches are currently reads XOR writes
+ */
+ if (dd->next_rq[WRITE])
+ rq = dd->next_rq[WRITE];
+ else
+ rq = dd->next_rq[READ];
+
+ if (rq && dd->batching < dd->fifo_batch)
+ /* we have a next request are still entitled to batch */
+ goto dispatch_request;
+
+ /*
+ * at this point we are not running a batch. select the appropriate
+ * data direction (read / write)
+ */
+
+ if (reads) {
+ BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
+
+ if (writes && (dd->starved++ >= dd->writes_starved))
+ goto dispatch_writes;
+
+ data_dir = READ;
+
+ goto dispatch_find_request;
+ }
+
+ /*
+ * there are either no reads or writes have been starved
+ */
+
+ if (writes) {
+dispatch_writes:
+ BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
+
+ dd->starved = 0;
+
+ data_dir = WRITE;
+
+ goto dispatch_find_request;
+ }
+
+ return 0;
+
+dispatch_find_request:
+ /*
+ * we are not running a batch, find best request for selected data_dir
+ */
+ if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
+ /*
+ * A deadline has expired, the last request was in the other
+ * direction, or we have run out of higher-sectored requests.
+ * Start again from the request with the earliest expiry time.
+ */
+ rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
+ } else {
+ /*
+ * The last req was the same dir and we have a next request in
+ * sort order. No expired requests so continue on from here.
+ */
+ rq = dd->next_rq[data_dir];
+ }
+
+ dd->batching = 0;
+
+dispatch_request:
+ /*
+ * rq is the selected appropriate request.
+ */
+ dd->batching++;
+ deadline_move_request(dd, rq);
+
+ return 1;
+}
+
+static int deadline_queue_empty(struct request_queue *q)
+{
+ struct deadline_data *dd = q->elevator->elevator_data;
+
+ return list_empty(&dd->fifo_list[WRITE])
+ && list_empty(&dd->fifo_list[READ]);
+}
+
+static void deadline_exit_queue(struct elevator_queue *e)
+{
+ struct deadline_data *dd = e->elevator_data;
+
+ BUG_ON(!list_empty(&dd->fifo_list[READ]));
+ BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
+
+ kfree(dd);
+}
+
+/*
+ * initialize elevator private data (deadline_data).
+ */
+static void *deadline_init_queue(struct request_queue *q)
+{
+ struct deadline_data *dd;
+
+ dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node);
+ if (!dd)
+ return NULL;
+
+ INIT_LIST_HEAD(&dd->fifo_list[READ]);
+ INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
+ dd->sort_list[READ] = RB_ROOT;
+ dd->sort_list[WRITE] = RB_ROOT;
+ dd->fifo_expire[READ] = read_expire;
+ dd->fifo_expire[WRITE] = write_expire;
+ dd->writes_starved = writes_starved;
+ dd->front_merges = 1;
+ dd->fifo_batch = fifo_batch;
+ return dd;
+}
+
+/*
+ * sysfs parts below
+ */
+
+static ssize_t
+deadline_var_show(int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+deadline_var_store(int *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtol(p, &p, 10);
+ return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct deadline_data *dd = e->elevator_data; \
+ int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return deadline_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
+SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
+SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
+SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
+SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct deadline_data *dd = e->elevator_data; \
+ int __data; \
+ int ret = deadline_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
+ return ret; \
+}
+STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
+STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
+STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
+#undef STORE_FUNCTION
+
+#define DD_ATTR(name) \
+ __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
+ deadline_##name##_store)
+
+static struct elv_fs_entry deadline_attrs[] = {
+ DD_ATTR(read_expire),
+ DD_ATTR(write_expire),
+ DD_ATTR(writes_starved),
+ DD_ATTR(front_merges),
+ DD_ATTR(fifo_batch),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_deadline = {
+ .ops = {
+ .elevator_merge_fn = deadline_merge,
+ .elevator_merged_fn = deadline_merged_request,
+ .elevator_merge_req_fn = deadline_merged_requests,
+ .elevator_dispatch_fn = deadline_dispatch_requests,
+ .elevator_add_req_fn = deadline_add_request,
+ .elevator_queue_empty_fn = deadline_queue_empty,
+ .elevator_former_req_fn = elv_rb_former_request,
+ .elevator_latter_req_fn = elv_rb_latter_request,
+ .elevator_init_fn = deadline_init_queue,
+ .elevator_exit_fn = deadline_exit_queue,
+ },
+
+ .elevator_attrs = deadline_attrs,
+ .elevator_name = "deadline",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init deadline_init(void)
+{
+ elv_register(&iosched_deadline);
+
+ return 0;
+}
+
+static void __exit deadline_exit(void)
+{
+ elv_unregister(&iosched_deadline);
+}
+
+module_init(deadline_init);
+module_exit(deadline_exit);
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("deadline IO scheduler");
diff --git a/libdde-linux26/contrib/block/elevator.c b/libdde-linux26/contrib/block/elevator.c
new file mode 100644
index 00000000..0287fafd
--- /dev/null
+++ b/libdde-linux26/contrib/block/elevator.c
@@ -0,0 +1,1231 @@
+/*
+ * Block device elevator/IO-scheduler.
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ *
+ * 30042000 Jens Axboe <axboe@kernel.dk> :
+ *
+ * Split the elevator a bit so that it is possible to choose a different
+ * one or even write a new "plug in". There are three pieces:
+ * - elevator_fn, inserts a new request in the queue list
+ * - elevator_merge_fn, decides whether a new buffer can be merged with
+ * an existing request
+ * - elevator_dequeue_fn, called when a request is taken off the active list
+ *
+ * 20082000 Dave Jones <davej@suse.de> :
+ * Removed tests for max-bomb-segments, which was breaking elvtune
+ * when run without -bN
+ *
+ * Jens:
+ * - Rework again to work with bio instead of buffer_heads
+ * - loose bi_dev comparisons, partition handling is right now
+ * - completely modularize elevator setup and teardown
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/blktrace_api.h>
+#include <trace/block.h>
+#include <linux/hash.h>
+#include <linux/uaccess.h>
+
+#include "blk.h"
+#include "local.h"
+
+static DEFINE_SPINLOCK(elv_list_lock);
+static LIST_HEAD(elv_list);
+
+DEFINE_TRACE(block_rq_abort);
+
+/*
+ * Merge hash stuff.
+ */
+static const int elv_hash_shift = 6;
+#define ELV_HASH_BLOCK(sec) ((sec) >> 3)
+#define ELV_HASH_FN(sec) \
+ (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
+#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
+#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
+#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
+
+DEFINE_TRACE(block_rq_insert);
+DEFINE_TRACE(block_rq_issue);
+
+/*
+ * Query io scheduler to see if the current process issuing bio may be
+ * merged with rq.
+ */
+static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
+{
+ struct request_queue *q = rq->q;
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_allow_merge_fn)
+ return e->ops->elevator_allow_merge_fn(q, rq, bio);
+
+ return 1;
+}
+
+/*
+ * can we safely merge with this request?
+ */
+int elv_rq_merge_ok(struct request *rq, struct bio *bio)
+{
+ if (!rq_mergeable(rq))
+ return 0;
+
+ /*
+ * Don't merge file system requests and discard requests
+ */
+ if (bio_discard(bio) != bio_discard(rq->bio))
+ return 0;
+
+ /*
+ * different data direction or already started, don't merge
+ */
+ if (bio_data_dir(bio) != rq_data_dir(rq))
+ return 0;
+
+ /*
+ * must be same device and not a special request
+ */
+ if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
+ return 0;
+
+ /*
+ * only merge integrity protected bio into ditto rq
+ */
+ if (bio_integrity(bio) != blk_integrity_rq(rq))
+ return 0;
+
+ if (!elv_iosched_allow_merge(rq, bio))
+ return 0;
+
+ return 1;
+}
+EXPORT_SYMBOL(elv_rq_merge_ok);
+
+static inline int elv_try_merge(struct request *__rq, struct bio *bio)
+{
+ int ret = ELEVATOR_NO_MERGE;
+
+ /*
+ * we can merge and sequence is ok, check if it's possible
+ */
+ if (elv_rq_merge_ok(__rq, bio)) {
+ if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
+ ret = ELEVATOR_BACK_MERGE;
+ else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
+ ret = ELEVATOR_FRONT_MERGE;
+ }
+
+ return ret;
+}
+
+static struct elevator_type *elevator_find(const char *name)
+{
+ struct elevator_type *e;
+
+ list_for_each_entry(e, &elv_list, list) {
+ if (!strcmp(e->elevator_name, name))
+ return e;
+ }
+
+ return NULL;
+}
+
+static void elevator_put(struct elevator_type *e)
+{
+ module_put(e->elevator_owner);
+}
+
+static struct elevator_type *elevator_get(const char *name)
+{
+ struct elevator_type *e;
+
+ spin_lock(&elv_list_lock);
+
+ e = elevator_find(name);
+ if (!e) {
+ char elv[ELV_NAME_MAX + strlen("-iosched")];
+
+ spin_unlock(&elv_list_lock);
+
+ if (!strcmp(name, "anticipatory"))
+ sprintf(elv, "as-iosched");
+ else
+ sprintf(elv, "%s-iosched", name);
+
+ request_module("%s", elv);
+ spin_lock(&elv_list_lock);
+ e = elevator_find(name);
+ }
+
+ if (e && !try_module_get(e->elevator_owner))
+ e = NULL;
+
+ spin_unlock(&elv_list_lock);
+
+ return e;
+}
+
+static void *elevator_init_queue(struct request_queue *q,
+ struct elevator_queue *eq)
+{
+ return eq->ops->elevator_init_fn(q);
+}
+
+static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
+ void *data)
+{
+ q->elevator = eq;
+ eq->elevator_data = data;
+}
+
+static char chosen_elevator[16];
+
+static int __init elevator_setup(char *str)
+{
+ /*
+ * Be backwards-compatible with previous kernels, so users
+ * won't get the wrong elevator.
+ */
+ if (!strcmp(str, "as"))
+ strcpy(chosen_elevator, "anticipatory");
+ else
+ strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
+ return 1;
+}
+
+__setup("elevator=", elevator_setup);
+
+static struct kobj_type elv_ktype;
+
+static struct elevator_queue *elevator_alloc(struct request_queue *q,
+ struct elevator_type *e)
+{
+ struct elevator_queue *eq;
+ int i;
+
+ eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
+ if (unlikely(!eq))
+ goto err;
+
+ eq->ops = &e->ops;
+ eq->elevator_type = e;
+ kobject_init(&eq->kobj, &elv_ktype);
+ mutex_init(&eq->sysfs_lock);
+
+ eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
+ GFP_KERNEL, q->node);
+ if (!eq->hash)
+ goto err;
+
+ for (i = 0; i < ELV_HASH_ENTRIES; i++)
+ INIT_HLIST_HEAD(&eq->hash[i]);
+
+ return eq;
+err:
+ kfree(eq);
+ elevator_put(e);
+ return NULL;
+}
+
+static void elevator_release(struct kobject *kobj)
+{
+ struct elevator_queue *e;
+
+ e = container_of(kobj, struct elevator_queue, kobj);
+ elevator_put(e->elevator_type);
+ kfree(e->hash);
+ kfree(e);
+}
+
+int elevator_init(struct request_queue *q, char *name)
+{
+ struct elevator_type *e = NULL;
+ struct elevator_queue *eq;
+ int ret = 0;
+ void *data;
+
+ INIT_LIST_HEAD(&q->queue_head);
+ q->last_merge = NULL;
+ q->end_sector = 0;
+ q->boundary_rq = NULL;
+
+ if (name) {
+ e = elevator_get(name);
+ if (!e)
+ return -EINVAL;
+ }
+
+ if (!e && *chosen_elevator) {
+ e = elevator_get(chosen_elevator);
+ if (!e)
+ printk(KERN_ERR "I/O scheduler %s not found\n",
+ chosen_elevator);
+ }
+
+ if (!e) {
+ e = elevator_get(CONFIG_DEFAULT_IOSCHED);
+ if (!e) {
+ printk(KERN_ERR
+ "Default I/O scheduler not found. " \
+ "Using noop.\n");
+ e = elevator_get("noop");
+ }
+ }
+
+ eq = elevator_alloc(q, e);
+ if (!eq)
+ return -ENOMEM;
+
+ data = elevator_init_queue(q, eq);
+ if (!data) {
+ kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+
+ elevator_attach(q, eq, data);
+ return ret;
+}
+EXPORT_SYMBOL(elevator_init);
+
+void elevator_exit(struct elevator_queue *e)
+{
+ mutex_lock(&e->sysfs_lock);
+ if (e->ops->elevator_exit_fn)
+ e->ops->elevator_exit_fn(e);
+ e->ops = NULL;
+ mutex_unlock(&e->sysfs_lock);
+
+ kobject_put(&e->kobj);
+}
+EXPORT_SYMBOL(elevator_exit);
+
+static void elv_activate_rq(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_activate_req_fn)
+ e->ops->elevator_activate_req_fn(q, rq);
+}
+
+static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_deactivate_req_fn)
+ e->ops->elevator_deactivate_req_fn(q, rq);
+}
+
+static inline void __elv_rqhash_del(struct request *rq)
+{
+ hlist_del_init(&rq->hash);
+}
+
+static void elv_rqhash_del(struct request_queue *q, struct request *rq)
+{
+ if (ELV_ON_HASH(rq))
+ __elv_rqhash_del(rq);
+}
+
+static void elv_rqhash_add(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ BUG_ON(ELV_ON_HASH(rq));
+ hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
+}
+
+static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
+{
+ __elv_rqhash_del(rq);
+ elv_rqhash_add(q, rq);
+}
+
+static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
+{
+ struct elevator_queue *e = q->elevator;
+ struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
+ struct hlist_node *entry, *next;
+ struct request *rq;
+
+ hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
+ BUG_ON(!ELV_ON_HASH(rq));
+
+ if (unlikely(!rq_mergeable(rq))) {
+ __elv_rqhash_del(rq);
+ continue;
+ }
+
+ if (rq_hash_key(rq) == offset)
+ return rq;
+ }
+
+ return NULL;
+}
+
+/*
+ * RB-tree support functions for inserting/lookup/removal of requests
+ * in a sorted RB tree.
+ */
+struct request *elv_rb_add(struct rb_root *root, struct request *rq)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct request *__rq;
+
+ while (*p) {
+ parent = *p;
+ __rq = rb_entry(parent, struct request, rb_node);
+
+ if (rq->sector < __rq->sector)
+ p = &(*p)->rb_left;
+ else if (rq->sector > __rq->sector)
+ p = &(*p)->rb_right;
+ else
+ return __rq;
+ }
+
+ rb_link_node(&rq->rb_node, parent, p);
+ rb_insert_color(&rq->rb_node, root);
+ return NULL;
+}
+EXPORT_SYMBOL(elv_rb_add);
+
+void elv_rb_del(struct rb_root *root, struct request *rq)
+{
+ BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
+ rb_erase(&rq->rb_node, root);
+ RB_CLEAR_NODE(&rq->rb_node);
+}
+EXPORT_SYMBOL(elv_rb_del);
+
+struct request *elv_rb_find(struct rb_root *root, sector_t sector)
+{
+ struct rb_node *n = root->rb_node;
+ struct request *rq;
+
+ while (n) {
+ rq = rb_entry(n, struct request, rb_node);
+
+ if (sector < rq->sector)
+ n = n->rb_left;
+ else if (sector > rq->sector)
+ n = n->rb_right;
+ else
+ return rq;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(elv_rb_find);
+
+/*
+ * Insert rq into dispatch queue of q. Queue lock must be held on
+ * entry. rq is sort instead into the dispatch queue. To be used by
+ * specific elevators.
+ */
+void elv_dispatch_sort(struct request_queue *q, struct request *rq)
+{
+ sector_t boundary;
+ struct list_head *entry;
+ int stop_flags;
+
+ if (q->last_merge == rq)
+ q->last_merge = NULL;
+
+ elv_rqhash_del(q, rq);
+
+ q->nr_sorted--;
+
+ boundary = q->end_sector;
+ stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
+ list_for_each_prev(entry, &q->queue_head) {
+ struct request *pos = list_entry_rq(entry);
+
+ if (blk_discard_rq(rq) != blk_discard_rq(pos))
+ break;
+ if (rq_data_dir(rq) != rq_data_dir(pos))
+ break;
+ if (pos->cmd_flags & stop_flags)
+ break;
+ if (rq->sector >= boundary) {
+ if (pos->sector < boundary)
+ continue;
+ } else {
+ if (pos->sector >= boundary)
+ break;
+ }
+ if (rq->sector >= pos->sector)
+ break;
+ }
+
+ list_add(&rq->queuelist, entry);
+}
+EXPORT_SYMBOL(elv_dispatch_sort);
+
+/*
+ * Insert rq into dispatch queue of q. Queue lock must be held on
+ * entry. rq is added to the back of the dispatch queue. To be used by
+ * specific elevators.
+ */
+void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
+{
+ if (q->last_merge == rq)
+ q->last_merge = NULL;
+
+ elv_rqhash_del(q, rq);
+
+ q->nr_sorted--;
+
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = rq;
+ list_add_tail(&rq->queuelist, &q->queue_head);
+}
+EXPORT_SYMBOL(elv_dispatch_add_tail);
+
+int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
+{
+ struct elevator_queue *e = q->elevator;
+ struct request *__rq;
+ int ret;
+
+ /*
+ * First try one-hit cache.
+ */
+ if (q->last_merge) {
+ ret = elv_try_merge(q->last_merge, bio);
+ if (ret != ELEVATOR_NO_MERGE) {
+ *req = q->last_merge;
+ return ret;
+ }
+ }
+
+ if (blk_queue_nomerges(q))
+ return ELEVATOR_NO_MERGE;
+
+ /*
+ * See if our hash lookup can find a potential backmerge.
+ */
+ __rq = elv_rqhash_find(q, bio->bi_sector);
+ if (__rq && elv_rq_merge_ok(__rq, bio)) {
+ *req = __rq;
+ return ELEVATOR_BACK_MERGE;
+ }
+
+ if (e->ops->elevator_merge_fn)
+ return e->ops->elevator_merge_fn(q, req, bio);
+
+ return ELEVATOR_NO_MERGE;
+}
+
+void elv_merged_request(struct request_queue *q, struct request *rq, int type)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_merged_fn)
+ e->ops->elevator_merged_fn(q, rq, type);
+
+ if (type == ELEVATOR_BACK_MERGE)
+ elv_rqhash_reposition(q, rq);
+
+ q->last_merge = rq;
+}
+
+void elv_merge_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_merge_req_fn)
+ e->ops->elevator_merge_req_fn(q, rq, next);
+
+ elv_rqhash_reposition(q, rq);
+ elv_rqhash_del(q, next);
+
+ q->nr_sorted--;
+ q->last_merge = rq;
+}
+
+void elv_requeue_request(struct request_queue *q, struct request *rq)
+{
+ /*
+ * it already went through dequeue, we need to decrement the
+ * in_flight count again
+ */
+ if (blk_account_rq(rq)) {
+ q->in_flight--;
+ if (blk_sorted_rq(rq))
+ elv_deactivate_rq(q, rq);
+ }
+
+ rq->cmd_flags &= ~REQ_STARTED;
+
+ elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
+}
+
+static void elv_drain_elevator(struct request_queue *q)
+{
+ static int printed;
+ while (q->elevator->ops->elevator_dispatch_fn(q, 1))
+ ;
+ if (q->nr_sorted == 0)
+ return;
+ if (printed++ < 10) {
+ printk(KERN_ERR "%s: forced dispatching is broken "
+ "(nr_sorted=%u), please report this\n",
+ q->elevator->elevator_type->elevator_name, q->nr_sorted);
+ }
+}
+
+void elv_insert(struct request_queue *q, struct request *rq, int where)
+{
+ struct list_head *pos;
+ unsigned ordseq;
+ int unplug_it = 1;
+
+ trace_block_rq_insert(q, rq);
+
+ rq->q = q;
+
+ switch (where) {
+ case ELEVATOR_INSERT_FRONT:
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+
+ list_add(&rq->queuelist, &q->queue_head);
+ break;
+
+ case ELEVATOR_INSERT_BACK:
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ elv_drain_elevator(q);
+ list_add_tail(&rq->queuelist, &q->queue_head);
+ /*
+ * We kick the queue here for the following reasons.
+ * - The elevator might have returned NULL previously
+ * to delay requests and returned them now. As the
+ * queue wasn't empty before this request, ll_rw_blk
+ * won't run the queue on return, resulting in hang.
+ * - Usually, back inserted requests won't be merged
+ * with anything. There's no point in delaying queue
+ * processing.
+ */
+ blk_remove_plug(q);
+ blk_start_queueing(q);
+ break;
+
+ case ELEVATOR_INSERT_SORT:
+ BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
+ rq->cmd_flags |= REQ_SORTED;
+ q->nr_sorted++;
+ if (rq_mergeable(rq)) {
+ elv_rqhash_add(q, rq);
+ if (!q->last_merge)
+ q->last_merge = rq;
+ }
+
+ /*
+ * Some ioscheds (cfq) run q->request_fn directly, so
+ * rq cannot be accessed after calling
+ * elevator_add_req_fn.
+ */
+ q->elevator->ops->elevator_add_req_fn(q, rq);
+ break;
+
+ case ELEVATOR_INSERT_REQUEUE:
+ /*
+ * If ordered flush isn't in progress, we do front
+ * insertion; otherwise, requests should be requeued
+ * in ordseq order.
+ */
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+
+ /*
+ * Most requeues happen because of a busy condition,
+ * don't force unplug of the queue for that case.
+ */
+ unplug_it = 0;
+
+ if (q->ordseq == 0) {
+ list_add(&rq->queuelist, &q->queue_head);
+ break;
+ }
+
+ ordseq = blk_ordered_req_seq(rq);
+
+ list_for_each(pos, &q->queue_head) {
+ struct request *pos_rq = list_entry_rq(pos);
+ if (ordseq <= blk_ordered_req_seq(pos_rq))
+ break;
+ }
+
+ list_add_tail(&rq->queuelist, pos);
+ break;
+
+ default:
+ printk(KERN_ERR "%s: bad insertion point %d\n",
+ __func__, where);
+ BUG();
+ }
+
+ if (unplug_it && blk_queue_plugged(q)) {
+ int nrq = q->rq.count[READ] + q->rq.count[WRITE]
+ - q->in_flight;
+
+ if (nrq >= q->unplug_thresh)
+ __generic_unplug_device(q);
+ }
+}
+
+void __elv_add_request(struct request_queue *q, struct request *rq, int where,
+ int plug)
+{
+ if (q->ordcolor)
+ rq->cmd_flags |= REQ_ORDERED_COLOR;
+
+ if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+ /*
+ * toggle ordered color
+ */
+ if (blk_barrier_rq(rq))
+ q->ordcolor ^= 1;
+
+ /*
+ * barriers implicitly indicate back insertion
+ */
+ if (where == ELEVATOR_INSERT_SORT)
+ where = ELEVATOR_INSERT_BACK;
+
+ /*
+ * this request is scheduling boundary, update
+ * end_sector
+ */
+ if (blk_fs_request(rq) || blk_discard_rq(rq)) {
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = rq;
+ }
+ } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
+ where == ELEVATOR_INSERT_SORT)
+ where = ELEVATOR_INSERT_BACK;
+
+ if (plug)
+ blk_plug_device(q);
+
+ elv_insert(q, rq, where);
+}
+EXPORT_SYMBOL(__elv_add_request);
+
+void elv_add_request(struct request_queue *q, struct request *rq, int where,
+ int plug)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __elv_add_request(q, rq, where, plug);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(elv_add_request);
+
+static inline struct request *__elv_next_request(struct request_queue *q)
+{
+ struct request *rq;
+
+ while (1) {
+ while (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ if (blk_do_ordered(q, &rq))
+ return rq;
+ }
+
+ if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
+ return NULL;
+ }
+}
+
+struct request *elv_next_request(struct request_queue *q)
+{
+ struct request *rq;
+ int ret;
+
+ while ((rq = __elv_next_request(q)) != NULL) {
+ if (!(rq->cmd_flags & REQ_STARTED)) {
+ /*
+ * This is the first time the device driver
+ * sees this request (possibly after
+ * requeueing). Notify IO scheduler.
+ */
+ if (blk_sorted_rq(rq))
+ elv_activate_rq(q, rq);
+
+ /*
+ * just mark as started even if we don't start
+ * it, a request that has been delayed should
+ * not be passed by new incoming requests
+ */
+ rq->cmd_flags |= REQ_STARTED;
+ trace_block_rq_issue(q, rq);
+ }
+
+ if (!q->boundary_rq || q->boundary_rq == rq) {
+ q->end_sector = rq_end_sector(rq);
+ q->boundary_rq = NULL;
+ }
+
+ if (rq->cmd_flags & REQ_DONTPREP)
+ break;
+
+ if (q->dma_drain_size && rq->data_len) {
+ /*
+ * make sure space for the drain appears we
+ * know we can do this because max_hw_segments
+ * has been adjusted to be one fewer than the
+ * device can handle
+ */
+ rq->nr_phys_segments++;
+ }
+
+ if (!q->prep_rq_fn)
+ break;
+
+ ret = q->prep_rq_fn(q, rq);
+ if (ret == BLKPREP_OK) {
+ break;
+ } else if (ret == BLKPREP_DEFER) {
+ /*
+ * the request may have been (partially) prepped.
+ * we need to keep this request in the front to
+ * avoid resource deadlock. REQ_STARTED will
+ * prevent other fs requests from passing this one.
+ */
+ if (q->dma_drain_size && rq->data_len &&
+ !(rq->cmd_flags & REQ_DONTPREP)) {
+ /*
+ * remove the space for the drain we added
+ * so that we don't add it again
+ */
+ --rq->nr_phys_segments;
+ }
+
+ rq = NULL;
+ break;
+ } else if (ret == BLKPREP_KILL) {
+ rq->cmd_flags |= REQ_QUIET;
+ __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+ } else {
+ printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
+ break;
+ }
+ }
+
+ return rq;
+}
+EXPORT_SYMBOL(elv_next_request);
+
+void elv_dequeue_request(struct request_queue *q, struct request *rq)
+{
+ BUG_ON(list_empty(&rq->queuelist));
+ BUG_ON(ELV_ON_HASH(rq));
+
+ list_del_init(&rq->queuelist);
+
+ /*
+ * the time frame between a request being removed from the lists
+ * and to it is freed is accounted as io that is in progress at
+ * the driver side.
+ */
+ if (blk_account_rq(rq))
+ q->in_flight++;
+}
+
+int elv_queue_empty(struct request_queue *q)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (!list_empty(&q->queue_head))
+ return 0;
+
+ if (e->ops->elevator_queue_empty_fn)
+ return e->ops->elevator_queue_empty_fn(q);
+
+ return 1;
+}
+EXPORT_SYMBOL(elv_queue_empty);
+
+struct request *elv_latter_request(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_latter_req_fn)
+ return e->ops->elevator_latter_req_fn(q, rq);
+ return NULL;
+}
+
+struct request *elv_former_request(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_former_req_fn)
+ return e->ops->elevator_former_req_fn(q, rq);
+ return NULL;
+}
+
+int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_set_req_fn)
+ return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
+
+ rq->elevator_private = NULL;
+ return 0;
+}
+
+void elv_put_request(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_put_req_fn)
+ e->ops->elevator_put_req_fn(rq);
+}
+
+int elv_may_queue(struct request_queue *q, int rw)
+{
+ struct elevator_queue *e = q->elevator;
+
+ if (e->ops->elevator_may_queue_fn)
+ return e->ops->elevator_may_queue_fn(q, rw);
+
+ return ELV_MQUEUE_MAY;
+}
+
+void elv_abort_queue(struct request_queue *q)
+{
+ struct request *rq;
+
+ while (!list_empty(&q->queue_head)) {
+ rq = list_entry_rq(q->queue_head.next);
+ rq->cmd_flags |= REQ_QUIET;
+ trace_block_rq_abort(q, rq);
+ __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
+ }
+}
+EXPORT_SYMBOL(elv_abort_queue);
+
+void elv_completed_request(struct request_queue *q, struct request *rq)
+{
+ struct elevator_queue *e = q->elevator;
+
+ /*
+ * request is released from the driver, io must be done
+ */
+ if (blk_account_rq(rq)) {
+ q->in_flight--;
+ if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
+ e->ops->elevator_completed_req_fn(q, rq);
+ }
+
+ /*
+ * Check if the queue is waiting for fs requests to be
+ * drained for flush sequence.
+ */
+ if (unlikely(q->ordseq)) {
+ struct request *next = NULL;
+
+ if (!list_empty(&q->queue_head))
+ next = list_entry_rq(q->queue_head.next);
+
+ if (!q->in_flight &&
+ blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
+ (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
+ blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
+ blk_start_queueing(q);
+ }
+ }
+}
+
+#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
+
+static ssize_t
+elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+ struct elv_fs_entry *entry = to_elv(attr);
+ struct elevator_queue *e;
+ ssize_t error;
+
+ if (!entry->show)
+ return -EIO;
+
+ e = container_of(kobj, struct elevator_queue, kobj);
+ mutex_lock(&e->sysfs_lock);
+ error = e->ops ? entry->show(e, page) : -ENOENT;
+ mutex_unlock(&e->sysfs_lock);
+ return error;
+}
+
+static ssize_t
+elv_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *page, size_t length)
+{
+ struct elv_fs_entry *entry = to_elv(attr);
+ struct elevator_queue *e;
+ ssize_t error;
+
+ if (!entry->store)
+ return -EIO;
+
+ e = container_of(kobj, struct elevator_queue, kobj);
+ mutex_lock(&e->sysfs_lock);
+ error = e->ops ? entry->store(e, page, length) : -ENOENT;
+ mutex_unlock(&e->sysfs_lock);
+ return error;
+}
+
+static struct sysfs_ops elv_sysfs_ops = {
+ .show = elv_attr_show,
+ .store = elv_attr_store,
+};
+
+static struct kobj_type elv_ktype = {
+ .sysfs_ops = &elv_sysfs_ops,
+ .release = elevator_release,
+};
+
+int elv_register_queue(struct request_queue *q)
+{
+ struct elevator_queue *e = q->elevator;
+ int error;
+
+ error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
+ if (!error) {
+ struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
+ if (attr) {
+ while (attr->attr.name) {
+ if (sysfs_create_file(&e->kobj, &attr->attr))
+ break;
+ attr++;
+ }
+ }
+ kobject_uevent(&e->kobj, KOBJ_ADD);
+ }
+ return error;
+}
+
+static void __elv_unregister_queue(struct elevator_queue *e)
+{
+ kobject_uevent(&e->kobj, KOBJ_REMOVE);
+ kobject_del(&e->kobj);
+}
+
+void elv_unregister_queue(struct request_queue *q)
+{
+ if (q)
+ __elv_unregister_queue(q->elevator);
+}
+
+void elv_register(struct elevator_type *e)
+{
+ char *def = "";
+
+ spin_lock(&elv_list_lock);
+ BUG_ON(elevator_find(e->elevator_name));
+ list_add_tail(&e->list, &elv_list);
+ spin_unlock(&elv_list_lock);
+
+ if (!strcmp(e->elevator_name, chosen_elevator) ||
+ (!*chosen_elevator &&
+ !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
+ def = " (default)";
+
+ printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
+ def);
+}
+EXPORT_SYMBOL_GPL(elv_register);
+
+void elv_unregister(struct elevator_type *e)
+{
+ struct task_struct *g, *p;
+
+ /*
+ * Iterate every thread in the process to remove the io contexts.
+ */
+ if (e->ops.trim) {
+ read_lock(&tasklist_lock);
+ do_each_thread(g, p) {
+ task_lock(p);
+ if (p->io_context)
+ e->ops.trim(p->io_context);
+ task_unlock(p);
+ } while_each_thread(g, p);
+ read_unlock(&tasklist_lock);
+ }
+
+ spin_lock(&elv_list_lock);
+ list_del_init(&e->list);
+ spin_unlock(&elv_list_lock);
+}
+EXPORT_SYMBOL_GPL(elv_unregister);
+
+/*
+ * switch to new_e io scheduler. be careful not to introduce deadlocks -
+ * we don't free the old io scheduler, before we have allocated what we
+ * need for the new one. this way we have a chance of going back to the old
+ * one, if the new one fails init for some reason.
+ */
+static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
+{
+ struct elevator_queue *old_elevator, *e;
+ void *data;
+
+ /*
+ * Allocate new elevator
+ */
+ e = elevator_alloc(q, new_e);
+ if (!e)
+ return 0;
+
+ data = elevator_init_queue(q, e);
+ if (!data) {
+ kobject_put(&e->kobj);
+ return 0;
+ }
+
+ /*
+ * Turn on BYPASS and drain all requests w/ elevator private data
+ */
+ spin_lock_irq(q->queue_lock);
+
+ queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
+
+ elv_drain_elevator(q);
+
+ while (q->rq.elvpriv) {
+ blk_start_queueing(q);
+ spin_unlock_irq(q->queue_lock);
+ msleep(10);
+ spin_lock_irq(q->queue_lock);
+ elv_drain_elevator(q);
+ }
+
+ /*
+ * Remember old elevator.
+ */
+ old_elevator = q->elevator;
+
+ /*
+ * attach and start new elevator
+ */
+ elevator_attach(q, e, data);
+
+ spin_unlock_irq(q->queue_lock);
+
+ __elv_unregister_queue(old_elevator);
+
+ if (elv_register_queue(q))
+ goto fail_register;
+
+ /*
+ * finally exit old elevator and turn off BYPASS.
+ */
+ elevator_exit(old_elevator);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+ spin_unlock_irq(q->queue_lock);
+
+ blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
+
+ return 1;
+
+fail_register:
+ /*
+ * switch failed, exit the new io scheduler and reattach the old
+ * one again (along with re-adding the sysfs dir)
+ */
+ elevator_exit(e);
+ q->elevator = old_elevator;
+ elv_register_queue(q);
+
+ spin_lock_irq(q->queue_lock);
+ queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+ spin_unlock_irq(q->queue_lock);
+
+ return 0;
+}
+
+ssize_t elv_iosched_store(struct request_queue *q, const char *name,
+ size_t count)
+{
+ char elevator_name[ELV_NAME_MAX];
+ struct elevator_type *e;
+
+ strlcpy(elevator_name, name, sizeof(elevator_name));
+ strstrip(elevator_name);
+
+ e = elevator_get(elevator_name);
+ if (!e) {
+ printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
+ return -EINVAL;
+ }
+
+ if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
+ elevator_put(e);
+ return count;
+ }
+
+ if (!elevator_switch(q, e))
+ printk(KERN_ERR "elevator: switch to %s failed\n",
+ elevator_name);
+ return count;
+}
+
+ssize_t elv_iosched_show(struct request_queue *q, char *name)
+{
+ struct elevator_queue *e = q->elevator;
+ struct elevator_type *elv = e->elevator_type;
+ struct elevator_type *__e;
+ int len = 0;
+
+ spin_lock(&elv_list_lock);
+ list_for_each_entry(__e, &elv_list, list) {
+ if (!strcmp(elv->elevator_name, __e->elevator_name))
+ len += sprintf(name+len, "[%s] ", elv->elevator_name);
+ else
+ len += sprintf(name+len, "%s ", __e->elevator_name);
+ }
+ spin_unlock(&elv_list_lock);
+
+ len += sprintf(len+name, "\n");
+ return len;
+}
+
+struct request *elv_rb_former_request(struct request_queue *q,
+ struct request *rq)
+{
+ struct rb_node *rbprev = rb_prev(&rq->rb_node);
+
+ if (rbprev)
+ return rb_entry_rq(rbprev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(elv_rb_former_request);
+
+struct request *elv_rb_latter_request(struct request_queue *q,
+ struct request *rq)
+{
+ struct rb_node *rbnext = rb_next(&rq->rb_node);
+
+ if (rbnext)
+ return rb_entry_rq(rbnext);
+
+ return NULL;
+}
+EXPORT_SYMBOL(elv_rb_latter_request);
diff --git a/libdde-linux26/contrib/block/ioctl.c b/libdde-linux26/contrib/block/ioctl.c
new file mode 100644
index 00000000..0f22e629
--- /dev/null
+++ b/libdde-linux26/contrib/block/ioctl.c
@@ -0,0 +1,372 @@
+#include <linux/capability.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/hdreg.h>
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+#include <linux/blktrace_api.h>
+#include <asm/uaccess.h>
+
+static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
+{
+ struct block_device *bdevp;
+ struct gendisk *disk;
+ struct hd_struct *part;
+ struct blkpg_ioctl_arg a;
+ struct blkpg_partition p;
+ struct disk_part_iter piter;
+ long long start, length;
+ int partno;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
+ return -EFAULT;
+ if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+ return -EFAULT;
+ disk = bdev->bd_disk;
+ if (bdev != bdev->bd_contains)
+ return -EINVAL;
+ partno = p.pno;
+ if (partno <= 0)
+ return -EINVAL;
+ switch (a.op) {
+ case BLKPG_ADD_PARTITION:
+ start = p.start >> 9;
+ length = p.length >> 9;
+ /* check for fit in a hd_struct */
+ if (sizeof(sector_t) == sizeof(long) &&
+ sizeof(long long) > sizeof(long)) {
+ long pstart = start, plength = length;
+ if (pstart != start || plength != length
+ || pstart < 0 || plength < 0)
+ return -EINVAL;
+ }
+
+ mutex_lock(&bdev->bd_mutex);
+
+ /* overlap? */
+ disk_part_iter_init(&piter, disk,
+ DISK_PITER_INCL_EMPTY);
+ while ((part = disk_part_iter_next(&piter))) {
+ if (!(start + length <= part->start_sect ||
+ start >= part->start_sect + part->nr_sects)) {
+ disk_part_iter_exit(&piter);
+ mutex_unlock(&bdev->bd_mutex);
+ return -EBUSY;
+ }
+ }
+ disk_part_iter_exit(&piter);
+
+ /* all seems OK */
+ part = add_partition(disk, partno, start, length,
+ ADDPART_FLAG_NONE);
+ mutex_unlock(&bdev->bd_mutex);
+ return IS_ERR(part) ? PTR_ERR(part) : 0;
+ case BLKPG_DEL_PARTITION:
+ part = disk_get_part(disk, partno);
+ if (!part)
+ return -ENXIO;
+
+ bdevp = bdget(part_devt(part));
+ disk_put_part(part);
+ if (!bdevp)
+ return -ENOMEM;
+
+ mutex_lock(&bdevp->bd_mutex);
+ if (bdevp->bd_openers) {
+ mutex_unlock(&bdevp->bd_mutex);
+ bdput(bdevp);
+ return -EBUSY;
+ }
+ /* all seems OK */
+ fsync_bdev(bdevp);
+ invalidate_bdev(bdevp);
+
+ mutex_lock_nested(&bdev->bd_mutex, 1);
+ delete_partition(disk, partno);
+ mutex_unlock(&bdev->bd_mutex);
+ mutex_unlock(&bdevp->bd_mutex);
+ bdput(bdevp);
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int blkdev_reread_part(struct block_device *bdev)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ int res;
+
+ if (!disk_partitionable(disk) || bdev != bdev->bd_contains)
+ return -EINVAL;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!mutex_trylock(&bdev->bd_mutex))
+ return -EBUSY;
+ res = rescan_partitions(disk, bdev);
+ mutex_unlock(&bdev->bd_mutex);
+ return res;
+}
+
+static void blk_ioc_discard_endio(struct bio *bio, int err)
+{
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
+ complete(bio->bi_private);
+}
+
+static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
+ uint64_t len)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+ int ret = 0;
+
+ if (start & 511)
+ return -EINVAL;
+ if (len & 511)
+ return -EINVAL;
+ start >>= 9;
+ len >>= 9;
+
+ if (start + len > (bdev->bd_inode->i_size >> 9))
+ return -EINVAL;
+
+ if (!q->prepare_discard_fn)
+ return -EOPNOTSUPP;
+
+ while (len && !ret) {
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_end_io = blk_ioc_discard_endio;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &wait;
+ bio->bi_sector = start;
+
+ if (len > q->max_hw_sectors) {
+ bio->bi_size = q->max_hw_sectors << 9;
+ len -= q->max_hw_sectors;
+ start += q->max_hw_sectors;
+ } else {
+ bio->bi_size = len << 9;
+ len = 0;
+ }
+ submit_bio(DISCARD_NOBARRIER, bio);
+
+ wait_for_completion(&wait);
+
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ else if (!bio_flagged(bio, BIO_UPTODATE))
+ ret = -EIO;
+ bio_put(bio);
+ }
+ return ret;
+}
+
+static int put_ushort(unsigned long arg, unsigned short val)
+{
+ return put_user(val, (unsigned short __user *)arg);
+}
+
+static int put_int(unsigned long arg, int val)
+{
+ return put_user(val, (int __user *)arg);
+}
+
+static int put_long(unsigned long arg, long val)
+{
+ return put_user(val, (long __user *)arg);
+}
+
+static int put_ulong(unsigned long arg, unsigned long val)
+{
+ return put_user(val, (unsigned long __user *)arg);
+}
+
+static int put_u64(unsigned long arg, u64 val)
+{
+ return put_user(val, (u64 __user *)arg);
+}
+
+int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned cmd, unsigned long arg)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ int ret;
+
+ if (disk->fops->ioctl)
+ return disk->fops->ioctl(bdev, mode, cmd, arg);
+
+ if (disk->fops->locked_ioctl) {
+ lock_kernel();
+ ret = disk->fops->locked_ioctl(bdev, mode, cmd, arg);
+ unlock_kernel();
+ return ret;
+ }
+
+ return -ENOTTY;
+}
+/*
+ * For the record: _GPL here is only because somebody decided to slap it
+ * on the previous export. Sheer idiocy, since it wasn't copyrightable
+ * at all and could be open-coded without any exports by anybody who cares.
+ */
+EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
+
+/*
+ * always keep this in sync with compat_blkdev_ioctl() and
+ * compat_blkdev_locked_ioctl()
+ */
+int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
+ unsigned long arg)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct backing_dev_info *bdi;
+ loff_t size;
+ int ret, n;
+
+ switch(cmd) {
+ case BLKFLSBUF:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ /* -EINVAL to handle old uncorrected drivers */
+ if (ret != -EINVAL && ret != -ENOTTY)
+ return ret;
+
+ lock_kernel();
+ fsync_bdev(bdev);
+ invalidate_bdev(bdev);
+ unlock_kernel();
+ return 0;
+
+ case BLKROSET:
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ /* -EINVAL to handle old uncorrected drivers */
+ if (ret != -EINVAL && ret != -ENOTTY)
+ return ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (get_user(n, (int __user *)(arg)))
+ return -EFAULT;
+ lock_kernel();
+ set_device_ro(bdev, n);
+ unlock_kernel();
+ return 0;
+
+ case BLKDISCARD: {
+ uint64_t range[2];
+
+ if (!(mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+ return -EFAULT;
+
+ return blk_ioctl_discard(bdev, range[0], range[1]);
+ }
+
+ case HDIO_GETGEO: {
+ struct hd_geometry geo;
+
+ if (!arg)
+ return -EINVAL;
+ if (!disk->fops->getgeo)
+ return -ENOTTY;
+
+ /*
+ * We need to set the startsect first, the driver may
+ * want to override it.
+ */
+ geo.start = get_start_sect(bdev);
+ ret = disk->fops->getgeo(bdev, &geo);
+ if (ret)
+ return ret;
+ if (copy_to_user((struct hd_geometry __user *)arg, &geo,
+ sizeof(geo)))
+ return -EFAULT;
+ return 0;
+ }
+ case BLKRAGET:
+ case BLKFRAGET:
+ if (!arg)
+ return -EINVAL;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+ case BLKROGET:
+ return put_int(arg, bdev_read_only(bdev) != 0);
+ case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
+ return put_int(arg, block_size(bdev));
+ case BLKSSZGET: /* get block device hardware sector size */
+ return put_int(arg, bdev_hardsect_size(bdev));
+ case BLKSECTGET:
+ return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
+ case BLKRASET:
+ case BLKFRASET:
+ if(!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ bdi = blk_get_backing_dev_info(bdev);
+ if (bdi == NULL)
+ return -ENOTTY;
+ bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+ return 0;
+ case BLKBSZSET:
+ /* set the logical block size */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!arg)
+ return -EINVAL;
+ if (get_user(n, (int __user *) arg))
+ return -EFAULT;
+ if (!(mode & FMODE_EXCL) && bd_claim(bdev, &bdev) < 0)
+ return -EBUSY;
+ ret = set_blocksize(bdev, n);
+ if (!(mode & FMODE_EXCL))
+ bd_release(bdev);
+ return ret;
+ case BLKPG:
+ lock_kernel();
+ ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
+ unlock_kernel();
+ break;
+ case BLKRRPART:
+ lock_kernel();
+ ret = blkdev_reread_part(bdev);
+ unlock_kernel();
+ break;
+ case BLKGETSIZE:
+ size = bdev->bd_inode->i_size;
+ if ((size >> 9) > ~0UL)
+ return -EFBIG;
+ return put_ulong(arg, size >> 9);
+ case BLKGETSIZE64:
+ return put_u64(arg, bdev->bd_inode->i_size);
+ case BLKTRACESTART:
+ case BLKTRACESTOP:
+ case BLKTRACESETUP:
+ case BLKTRACETEARDOWN:
+ lock_kernel();
+ ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg);
+ unlock_kernel();
+ break;
+ default:
+ ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blkdev_ioctl);
diff --git a/libdde-linux26/contrib/block/noop-iosched.c b/libdde-linux26/contrib/block/noop-iosched.c
new file mode 100644
index 00000000..075cb108
--- /dev/null
+++ b/libdde-linux26/contrib/block/noop-iosched.c
@@ -0,0 +1,123 @@
+/*
+ * elevator noop
+ */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "local.h"
+
+struct noop_data {
+ struct list_head queue;
+};
+
+static void noop_merged_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ list_del_init(&next->queuelist);
+}
+
+static int noop_dispatch(struct request_queue *q, int force)
+{
+ struct noop_data *nd = q->elevator->elevator_data;
+
+ if (!list_empty(&nd->queue)) {
+ struct request *rq;
+ rq = list_entry(nd->queue.next, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ elv_dispatch_sort(q, rq);
+ return 1;
+ }
+ return 0;
+}
+
+static void noop_add_request(struct request_queue *q, struct request *rq)
+{
+ struct noop_data *nd = q->elevator->elevator_data;
+
+ list_add_tail(&rq->queuelist, &nd->queue);
+}
+
+static int noop_queue_empty(struct request_queue *q)
+{
+ struct noop_data *nd = q->elevator->elevator_data;
+
+ return list_empty(&nd->queue);
+}
+
+static struct request *
+noop_former_request(struct request_queue *q, struct request *rq)
+{
+ struct noop_data *nd = q->elevator->elevator_data;
+
+ if (rq->queuelist.prev == &nd->queue)
+ return NULL;
+ return list_entry(rq->queuelist.prev, struct request, queuelist);
+}
+
+static struct request *
+noop_latter_request(struct request_queue *q, struct request *rq)
+{
+ struct noop_data *nd = q->elevator->elevator_data;
+
+ if (rq->queuelist.next == &nd->queue)
+ return NULL;
+ return list_entry(rq->queuelist.next, struct request, queuelist);
+}
+
+static void *noop_init_queue(struct request_queue *q)
+{
+ struct noop_data *nd;
+
+ nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
+ if (!nd)
+ return NULL;
+ INIT_LIST_HEAD(&nd->queue);
+ return nd;
+}
+
+static void noop_exit_queue(struct elevator_queue *e)
+{
+ struct noop_data *nd = e->elevator_data;
+
+ BUG_ON(!list_empty(&nd->queue));
+ kfree(nd);
+}
+
+static struct elevator_type elevator_noop = {
+ .ops = {
+ .elevator_merge_req_fn = noop_merged_requests,
+ .elevator_dispatch_fn = noop_dispatch,
+ .elevator_add_req_fn = noop_add_request,
+ .elevator_queue_empty_fn = noop_queue_empty,
+ .elevator_former_req_fn = noop_former_request,
+ .elevator_latter_req_fn = noop_latter_request,
+ .elevator_init_fn = noop_init_queue,
+ .elevator_exit_fn = noop_exit_queue,
+ },
+ .elevator_name = "noop",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init noop_init(void)
+{
+ DEBUG_MSG("here!");
+ elv_register(&elevator_noop);
+
+ return 0;
+}
+
+static void __exit noop_exit(void)
+{
+ elv_unregister(&elevator_noop);
+}
+
+subsys_initcall(noop_init);
+module_exit(noop_exit);
+
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("No-op IO scheduler");
diff --git a/libdde-linux26/contrib/block/scsi_ioctl.c b/libdde-linux26/contrib/block/scsi_ioctl.c
new file mode 100644
index 00000000..ee9c67d7
--- /dev/null
+++ b/libdde-linux26/contrib/block/scsi_ioctl.c
@@ -0,0 +1,652 @@
+/*
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/capability.h>
+#include <linux/completion.h>
+#include <linux/cdrom.h>
+#include <linux/slab.h>
+#include <linux/times.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+
+/* Command group 3 is reserved and should never be used. */
+const unsigned char scsi_command_size_tbl[8] =
+{
+ 6, 10, 10, 12,
+ 16, 12, 10, 10
+};
+EXPORT_SYMBOL(scsi_command_size_tbl);
+
+#include <scsi/sg.h>
+
+static int sg_get_version(int __user *p)
+{
+ static const int sg_version_num = 30527;
+ return put_user(sg_version_num, p);
+}
+
+static int scsi_get_idlun(struct request_queue *q, int __user *p)
+{
+ return put_user(0, p);
+}
+
+static int scsi_get_bus(struct request_queue *q, int __user *p)
+{
+ return put_user(0, p);
+}
+
+static int sg_get_timeout(struct request_queue *q)
+{
+ return jiffies_to_clock_t(q->sg_timeout);
+}
+
+static int sg_set_timeout(struct request_queue *q, int __user *p)
+{
+ int timeout, err = get_user(timeout, p);
+
+ if (!err)
+ q->sg_timeout = clock_t_to_jiffies(timeout);
+
+ return err;
+}
+
+static int sg_get_reserved_size(struct request_queue *q, int __user *p)
+{
+ unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
+
+ return put_user(val, p);
+}
+
+static int sg_set_reserved_size(struct request_queue *q, int __user *p)
+{
+ int size, err = get_user(size, p);
+
+ if (err)
+ return err;
+
+ if (size < 0)
+ return -EINVAL;
+ if (size > (q->max_sectors << 9))
+ size = q->max_sectors << 9;
+
+ q->sg_reserved_size = size;
+ return 0;
+}
+
+/*
+ * will always return that we are ATAPI even for a real SCSI drive, I'm not
+ * so sure this is worth doing anything about (why would you care??)
+ */
+static int sg_emulated_host(struct request_queue *q, int __user *p)
+{
+ return put_user(1, p);
+}
+
+void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
+{
+ /* Basic read-only commands */
+ __set_bit(TEST_UNIT_READY, filter->read_ok);
+ __set_bit(REQUEST_SENSE, filter->read_ok);
+ __set_bit(READ_6, filter->read_ok);
+ __set_bit(READ_10, filter->read_ok);
+ __set_bit(READ_12, filter->read_ok);
+ __set_bit(READ_16, filter->read_ok);
+ __set_bit(READ_BUFFER, filter->read_ok);
+ __set_bit(READ_DEFECT_DATA, filter->read_ok);
+ __set_bit(READ_CAPACITY, filter->read_ok);
+ __set_bit(READ_LONG, filter->read_ok);
+ __set_bit(INQUIRY, filter->read_ok);
+ __set_bit(MODE_SENSE, filter->read_ok);
+ __set_bit(MODE_SENSE_10, filter->read_ok);
+ __set_bit(LOG_SENSE, filter->read_ok);
+ __set_bit(START_STOP, filter->read_ok);
+ __set_bit(GPCMD_VERIFY_10, filter->read_ok);
+ __set_bit(VERIFY_16, filter->read_ok);
+ __set_bit(REPORT_LUNS, filter->read_ok);
+ __set_bit(SERVICE_ACTION_IN, filter->read_ok);
+ __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok);
+ __set_bit(MAINTENANCE_IN, filter->read_ok);
+ __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok);
+
+ /* Audio CD commands */
+ __set_bit(GPCMD_PLAY_CD, filter->read_ok);
+ __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok);
+ __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok);
+ __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok);
+ __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok);
+
+ /* CD/DVD data reading */
+ __set_bit(GPCMD_READ_CD, filter->read_ok);
+ __set_bit(GPCMD_READ_CD_MSF, filter->read_ok);
+ __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok);
+ __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok);
+ __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok);
+ __set_bit(GPCMD_READ_HEADER, filter->read_ok);
+ __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok);
+ __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok);
+ __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok);
+ __set_bit(GPCMD_REPORT_KEY, filter->read_ok);
+ __set_bit(GPCMD_SCAN, filter->read_ok);
+ __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok);
+ __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok);
+ __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok);
+ __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok);
+ __set_bit(GPCMD_SEEK, filter->read_ok);
+ __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok);
+
+ /* Basic writing commands */
+ __set_bit(WRITE_6, filter->write_ok);
+ __set_bit(WRITE_10, filter->write_ok);
+ __set_bit(WRITE_VERIFY, filter->write_ok);
+ __set_bit(WRITE_12, filter->write_ok);
+ __set_bit(WRITE_VERIFY_12, filter->write_ok);
+ __set_bit(WRITE_16, filter->write_ok);
+ __set_bit(WRITE_LONG, filter->write_ok);
+ __set_bit(WRITE_LONG_2, filter->write_ok);
+ __set_bit(ERASE, filter->write_ok);
+ __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
+ __set_bit(MODE_SELECT, filter->write_ok);
+ __set_bit(LOG_SELECT, filter->write_ok);
+ __set_bit(GPCMD_BLANK, filter->write_ok);
+ __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok);
+ __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok);
+ __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok);
+ __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok);
+ __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok);
+ __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok);
+ __set_bit(GPCMD_SEND_EVENT, filter->write_ok);
+ __set_bit(GPCMD_SEND_KEY, filter->write_ok);
+ __set_bit(GPCMD_SEND_OPC, filter->write_ok);
+ __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok);
+ __set_bit(GPCMD_SET_SPEED, filter->write_ok);
+ __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok);
+ __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok);
+ __set_bit(GPCMD_SET_STREAMING, filter->write_ok);
+ __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok);
+}
+EXPORT_SYMBOL_GPL(blk_set_cmd_filter_defaults);
+
+static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
+ struct sg_io_hdr *hdr, fmode_t mode)
+{
+ if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
+ return -EFAULT;
+ if (blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE))
+ return -EPERM;
+
+ /*
+ * fill in request structure
+ */
+ rq->cmd_len = hdr->cmd_len;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ rq->timeout = msecs_to_jiffies(hdr->timeout);
+ if (!rq->timeout)
+ rq->timeout = q->sg_timeout;
+ if (!rq->timeout)
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ if (rq->timeout < BLK_MIN_SG_TIMEOUT)
+ rq->timeout = BLK_MIN_SG_TIMEOUT;
+
+ return 0;
+}
+
+/*
+ * unmap a request that was previously mapped to this sg_io_hdr. handles
+ * both sg and non-sg sg_io_hdr.
+ */
+static int blk_unmap_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr)
+{
+ blk_rq_unmap_user(rq->bio);
+ blk_put_request(rq);
+ return 0;
+}
+
+static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
+ struct bio *bio)
+{
+ int r, ret = 0;
+
+ /*
+ * fill in all the output members
+ */
+ hdr->status = rq->errors & 0xff;
+ hdr->masked_status = status_byte(rq->errors);
+ hdr->msg_status = msg_byte(rq->errors);
+ hdr->host_status = host_byte(rq->errors);
+ hdr->driver_status = driver_byte(rq->errors);
+ hdr->info = 0;
+ if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+ hdr->info |= SG_INFO_CHECK;
+ hdr->resid = rq->data_len;
+ hdr->sb_len_wr = 0;
+
+ if (rq->sense_len && hdr->sbp) {
+ int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
+
+ if (!copy_to_user(hdr->sbp, rq->sense, len))
+ hdr->sb_len_wr = len;
+ else
+ ret = -EFAULT;
+ }
+
+ rq->bio = bio;
+ r = blk_unmap_sghdr_rq(rq, hdr);
+ if (ret)
+ r = ret;
+
+ return r;
+}
+
+static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
+ struct sg_io_hdr *hdr, fmode_t mode)
+{
+ unsigned long start_time;
+ int writing = 0, ret = 0;
+ struct request *rq;
+ char sense[SCSI_SENSE_BUFFERSIZE];
+ struct bio *bio;
+
+ if (hdr->interface_id != 'S')
+ return -EINVAL;
+ if (hdr->cmd_len > BLK_MAX_CDB)
+ return -EINVAL;
+
+ if (hdr->dxfer_len > (q->max_hw_sectors << 9))
+ return -EIO;
+
+ if (hdr->dxfer_len)
+ switch (hdr->dxfer_direction) {
+ default:
+ return -EINVAL;
+ case SG_DXFER_TO_DEV:
+ writing = 1;
+ break;
+ case SG_DXFER_TO_FROM_DEV:
+ case SG_DXFER_FROM_DEV:
+ break;
+ }
+
+ rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
+ if (!rq)
+ return -ENOMEM;
+
+ if (blk_fill_sghdr_rq(q, rq, hdr, mode)) {
+ blk_put_request(rq);
+ return -EFAULT;
+ }
+
+ if (hdr->iovec_count) {
+ const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
+ struct sg_iovec *iov;
+
+ iov = kmalloc(size, GFP_KERNEL);
+ if (!iov) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(iov, hdr->dxferp, size)) {
+ kfree(iov);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+ hdr->dxfer_len, GFP_KERNEL);
+ kfree(iov);
+ } else if (hdr->dxfer_len)
+ ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
+ GFP_KERNEL);
+
+ if (ret)
+ goto out;
+
+ bio = rq->bio;
+ memset(sense, 0, sizeof(sense));
+ rq->sense = sense;
+ rq->sense_len = 0;
+ rq->retries = 0;
+
+ start_time = jiffies;
+
+ /* ignore return value. All information is passed back to caller
+ * (if he doesn't check that is his problem).
+ * N.B. a non-zero SCSI status is _not_ necessarily an error.
+ */
+ blk_execute_rq(q, bd_disk, rq, 0);
+
+ hdr->duration = jiffies_to_msecs(jiffies - start_time);
+
+ return blk_complete_sghdr_rq(rq, hdr, bio);
+out:
+ blk_put_request(rq);
+ return ret;
+}
+
+/**
+ * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
+ * @file: file this ioctl operates on (optional)
+ * @q: request queue to send scsi commands down
+ * @disk: gendisk to operate on (option)
+ * @sic: userspace structure describing the command to perform
+ *
+ * Send down the scsi command described by @sic to the device below
+ * the request queue @q. If @file is non-NULL it's used to perform
+ * fine-grained permission checks that allow users to send down
+ * non-destructive SCSI commands. If the caller has a struct gendisk
+ * available it should be passed in as @disk to allow the low level
+ * driver to use the information contained in it. A non-NULL @disk
+ * is only allowed if the caller knows that the low level driver doesn't
+ * need it (e.g. in the scsi subsystem).
+ *
+ * Notes:
+ * - This interface is deprecated - users should use the SG_IO
+ * interface instead, as this is a more flexible approach to
+ * performing SCSI commands on a device.
+ * - The SCSI command length is determined by examining the 1st byte
+ * of the given command. There is no way to override this.
+ * - Data transfers are limited to PAGE_SIZE
+ * - The length (x + y) must be at least OMAX_SB_LEN bytes long to
+ * accommodate the sense buffer when an error occurs.
+ * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that
+ * old code will not be surprised.
+ * - If a Unix error occurs (e.g. ENOMEM) then the user will receive
+ * a negative return and the Unix error code in 'errno'.
+ * If the SCSI command succeeds then 0 is returned.
+ * Positive numbers returned are the compacted SCSI error codes (4
+ * bytes in one int) where the lowest byte is the SCSI status.
+ */
+#define OMAX_SB_LEN 16 /* For backward compatibility */
+int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
+ struct scsi_ioctl_command __user *sic)
+{
+ struct request *rq;
+ int err;
+ unsigned int in_len, out_len, bytes, opcode, cmdlen;
+ char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
+
+ if (!sic)
+ return -EINVAL;
+
+ /*
+ * get in an out lengths, verify they don't exceed a page worth of data
+ */
+ if (get_user(in_len, &sic->inlen))
+ return -EFAULT;
+ if (get_user(out_len, &sic->outlen))
+ return -EFAULT;
+ if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
+ return -EINVAL;
+ if (get_user(opcode, sic->data))
+ return -EFAULT;
+
+ bytes = max(in_len, out_len);
+ if (bytes) {
+ buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
+ if (!buffer)
+ return -ENOMEM;
+
+ }
+
+ rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
+
+ cmdlen = COMMAND_SIZE(opcode);
+
+ /*
+ * get command and data to send to device, if any
+ */
+ err = -EFAULT;
+ rq->cmd_len = cmdlen;
+ if (copy_from_user(rq->cmd, sic->data, cmdlen))
+ goto error;
+
+ if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
+ goto error;
+
+ err = blk_verify_command(&q->cmd_filter, rq->cmd, mode & FMODE_WRITE);
+ if (err)
+ goto error;
+
+ /* default. possible overriden later */
+ rq->retries = 5;
+
+ switch (opcode) {
+ case SEND_DIAGNOSTIC:
+ case FORMAT_UNIT:
+ rq->timeout = FORMAT_UNIT_TIMEOUT;
+ rq->retries = 1;
+ break;
+ case START_STOP:
+ rq->timeout = START_STOP_TIMEOUT;
+ break;
+ case MOVE_MEDIUM:
+ rq->timeout = MOVE_MEDIUM_TIMEOUT;
+ break;
+ case READ_ELEMENT_STATUS:
+ rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
+ break;
+ case READ_DEFECT_DATA:
+ rq->timeout = READ_DEFECT_DATA_TIMEOUT;
+ rq->retries = 1;
+ break;
+ default:
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ break;
+ }
+
+ if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
+ err = DRIVER_ERROR << 24;
+ goto out;
+ }
+
+ memset(sense, 0, sizeof(sense));
+ rq->sense = sense;
+ rq->sense_len = 0;
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+
+ blk_execute_rq(q, disk, rq, 0);
+
+out:
+ err = rq->errors & 0xff; /* only 8 bit SCSI status */
+ if (err) {
+ if (rq->sense_len && rq->sense) {
+ bytes = (OMAX_SB_LEN > rq->sense_len) ?
+ rq->sense_len : OMAX_SB_LEN;
+ if (copy_to_user(sic->data, rq->sense, bytes))
+ err = -EFAULT;
+ }
+ } else {
+ if (copy_to_user(sic->data, buffer, out_len))
+ err = -EFAULT;
+ }
+
+error:
+ kfree(buffer);
+ blk_put_request(rq);
+ return err;
+}
+EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
+
+/* Send basic block requests */
+static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
+ int cmd, int data)
+{
+ struct request *rq;
+ int err;
+
+ rq = blk_get_request(q, WRITE, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_BLOCK_PC;
+ rq->data = NULL;
+ rq->data_len = 0;
+ rq->extra_len = 0;
+ rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
+ rq->cmd[0] = cmd;
+ rq->cmd[4] = data;
+ rq->cmd_len = 6;
+ err = blk_execute_rq(q, bd_disk, rq, 0);
+ blk_put_request(rq);
+
+ return err;
+}
+
+static inline int blk_send_start_stop(struct request_queue *q,
+ struct gendisk *bd_disk, int data)
+{
+ return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data);
+}
+
+int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode,
+ unsigned int cmd, void __user *arg)
+{
+ int err;
+
+ if (!q || blk_get_queue(q))
+ return -ENXIO;
+
+ switch (cmd) {
+ /*
+ * new sgv3 interface
+ */
+ case SG_GET_VERSION_NUM:
+ err = sg_get_version(arg);
+ break;
+ case SCSI_IOCTL_GET_IDLUN:
+ err = scsi_get_idlun(q, arg);
+ break;
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ err = scsi_get_bus(q, arg);
+ break;
+ case SG_SET_TIMEOUT:
+ err = sg_set_timeout(q, arg);
+ break;
+ case SG_GET_TIMEOUT:
+ err = sg_get_timeout(q);
+ break;
+ case SG_GET_RESERVED_SIZE:
+ err = sg_get_reserved_size(q, arg);
+ break;
+ case SG_SET_RESERVED_SIZE:
+ err = sg_set_reserved_size(q, arg);
+ break;
+ case SG_EMULATED_HOST:
+ err = sg_emulated_host(q, arg);
+ break;
+ case SG_IO: {
+ struct sg_io_hdr hdr;
+
+ err = -EFAULT;
+ if (copy_from_user(&hdr, arg, sizeof(hdr)))
+ break;
+ err = sg_io(q, bd_disk, &hdr, mode);
+ if (err == -EFAULT)
+ break;
+
+ if (copy_to_user(arg, &hdr, sizeof(hdr)))
+ err = -EFAULT;
+ break;
+ }
+ case CDROM_SEND_PACKET: {
+ struct cdrom_generic_command cgc;
+ struct sg_io_hdr hdr;
+
+ err = -EFAULT;
+ if (copy_from_user(&cgc, arg, sizeof(cgc)))
+ break;
+ cgc.timeout = clock_t_to_jiffies(cgc.timeout);
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.interface_id = 'S';
+ hdr.cmd_len = sizeof(cgc.cmd);
+ hdr.dxfer_len = cgc.buflen;
+ err = 0;
+ switch (cgc.data_direction) {
+ case CGC_DATA_UNKNOWN:
+ hdr.dxfer_direction = SG_DXFER_UNKNOWN;
+ break;
+ case CGC_DATA_WRITE:
+ hdr.dxfer_direction = SG_DXFER_TO_DEV;
+ break;
+ case CGC_DATA_READ:
+ hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+ break;
+ case CGC_DATA_NONE:
+ hdr.dxfer_direction = SG_DXFER_NONE;
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ break;
+
+ hdr.dxferp = cgc.buffer;
+ hdr.sbp = cgc.sense;
+ if (hdr.sbp)
+ hdr.mx_sb_len = sizeof(struct request_sense);
+ hdr.timeout = jiffies_to_msecs(cgc.timeout);
+ hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
+ hdr.cmd_len = sizeof(cgc.cmd);
+
+ err = sg_io(q, bd_disk, &hdr, mode);
+ if (err == -EFAULT)
+ break;
+
+ if (hdr.status)
+ err = -EIO;
+
+ cgc.stat = err;
+ cgc.buflen = hdr.resid;
+ if (copy_to_user(arg, &cgc, sizeof(cgc)))
+ err = -EFAULT;
+
+ break;
+ }
+
+ /*
+ * old junk scsi send command ioctl
+ */
+ case SCSI_IOCTL_SEND_COMMAND:
+ printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
+ err = -EINVAL;
+ if (!arg)
+ break;
+
+ err = sg_scsi_ioctl(q, bd_disk, mode, arg);
+ break;
+ case CDROMCLOSETRAY:
+ err = blk_send_start_stop(q, bd_disk, 0x03);
+ break;
+ case CDROMEJECT:
+ err = blk_send_start_stop(q, bd_disk, 0x02);
+ break;
+ default:
+ err = -ENOTTY;
+ }
+
+ blk_put_queue(q);
+ return err;
+}
+
+EXPORT_SYMBOL(scsi_cmd_ioctl);