[PATCH 1/4] bidi support: request dma_data_direction

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



 - Introduce a new enum dma_data_direction data_dir member in struct request.
  and remove the RW bit from request->cmd_flag
- Add new API to query request direction.
- Adjust existing API and implementation.
- Cleanup wrong use of DMA_BIDIRECTIONAL
- Introduce new blk_rq_init_unqueued_req() and use it in places ad-hoc
  requests were used and bzero'ed.

Signed-off-by: Benny Halevy <bhalevy@xxxxxxxxxxx>
Signed-off-by: Boaz Harrosh <bharrosh@xxxxxxxxxxx>
---
 arch/um/drivers/ubd_kern.c      |    4 +-
 block/as-iosched.c              |    2 +-
 block/cfq-iosched.c             |   10 ++--
 block/deadline-iosched.c        |    8 ++--
 block/elevator.c                |   11 ++--
 block/ll_rw_blk.c               |  113 ++++++++++++++++++++++++---------------
 block/scsi_ioctl.c              |   10 ++--
 drivers/block/DAC960.c          |    2 +-
 drivers/block/amiflop.c         |    4 +-
 drivers/block/ataflop.c         |    2 +-
 drivers/block/cciss.c           |    6 +-
 drivers/block/cpqarray.c        |    6 +-
 drivers/block/floppy.c          |    9 +--
 drivers/block/nbd.c             |    3 +-
 drivers/block/paride/pcd.c      |    2 +-
 drivers/block/paride/pd.c       |   29 +++++------
 drivers/block/paride/pf.c       |    9 +---
 drivers/block/pktcdvd.c         |    2 +-
 drivers/block/ps2esdi.c         |    9 ++--
 drivers/block/swim3.c           |   26 +++++-----
 drivers/block/sx8.c             |    6 +-
 drivers/block/ub.c              |    8 +--
 drivers/block/viodasd.c         |    2 +-
 drivers/block/xd.c              |    4 +-
 drivers/block/z2ram.c           |    2 +-
 drivers/cdrom/cdrom.c           |    2 +-
 drivers/cdrom/cdu31a.c          |    2 +-
 drivers/cdrom/gscd.c            |    2 +-
 drivers/cdrom/sbpcd.c           |    2 +-
 drivers/cdrom/sonycd535.c       |    2 +-
 drivers/cdrom/viocd.c           |    2 +-
 drivers/ide/arm/icside.c        |    7 +--
 drivers/ide/cris/ide-cris.c     |    4 +-
 drivers/ide/ide-cd.c            |   13 +++--
 drivers/ide/ide-disk.c          |   10 ++--
 drivers/ide/ide-dma.c           |    4 +-
 drivers/ide/ide-floppy.c        |    4 +-
 drivers/ide/ide-io.c            |    5 +-
 drivers/ide/ide-tape.c          |    2 +-
 drivers/ide/ide-taskfile.c      |    6 +-
 drivers/ide/ide.c               |    4 +-
 drivers/ide/legacy/hd.c         |    6 +-
 drivers/ide/mips/au1xxx-ide.c   |    7 +--
 drivers/ide/pci/alim15x3.c      |    2 +-
 drivers/ide/pci/hpt366.c        |    2 +-
 drivers/ide/pci/pdc202xx_old.c  |    2 +-
 drivers/ide/pci/scc_pata.c      |    2 +-
 drivers/ide/pci/sgiioc4.c       |    4 +-
 drivers/ide/pci/trm290.c        |    2 +-
 drivers/ide/ppc/pmac.c          |    5 +-
 drivers/md/dm-emc.c             |    2 +-
 drivers/message/i2o/i2o_block.c |    8 ++--
 drivers/mmc/mmc_block.c         |   13 +++--
 drivers/mtd/mtd_blkdevs.c       |    8 ++--
 drivers/s390/block/dasd.c       |    2 +-
 drivers/s390/block/dasd_diag.c  |    4 +-
 drivers/s390/block/dasd_eckd.c  |   10 ++--
 drivers/s390/block/dasd_fba.c   |   14 +++---
 drivers/s390/char/tape_block.c  |    2 +-
 drivers/sbus/char/jsflash.c     |    2 +-
 drivers/scsi/aic7xxx_old.c      |    4 +-
 drivers/scsi/scsi_error.c       |    3 +-
 drivers/scsi/scsi_lib.c         |   21 +++----
 drivers/scsi/scsi_tgt_lib.c     |   29 ++++-------
 drivers/scsi/sd.c               |   20 ++++---
 drivers/scsi/sg.c               |    2 -
 drivers/scsi/sr.c               |   15 +++---
 drivers/scsi/sun3_NCR5380.c     |    6 +-
 include/linux/blkdev.h          |   53 ++++++++++++++++---
 include/linux/blktrace_api.h    |    8 +++-
 include/linux/dma-mapping.h     |   22 ++++++++
 include/linux/elevator.h        |    4 +-
 72 files changed, 359 insertions(+), 285 deletions(-)

diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 8bd9204..222ad17 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -1030,7 +1030,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
     int len;

     /* This should be impossible now */
-    if((rq_data_dir(req) == WRITE) && !ubd_dev->openflags.w){
+    if((rq_rw_dir(req) == WRITE) && !ubd_dev->openflags.w){
         printk("Write attempted on readonly ubd device %s\n",
                disk->disk_name);
         end_request(req, 0);
@@ -1049,7 +1049,7 @@ static int prepare_request(struct request *req, struct io_thread_req *io_req)
     io_req->error = 0;
     io_req->sector_mask = 0;

-    io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE;
+    io_req->op = (rq_uni_rw_dir(req) == READ) ? UBD_READ : UBD_WRITE;
     io_req->offsets[0] = 0;
     io_req->offsets[1] = ubd_dev->cow.data_offset;
     io_req->buffer = req->buffer;
diff --git a/block/as-iosched.c b/block/as-iosched.c
index ef12627..824d93e 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1285,7 +1285,7 @@ static void as_work_handler(struct work_struct *work)
     spin_unlock_irqrestore(q->queue_lock, flags);
 }

-static int as_may_queue(request_queue_t *q, int rw)
+static int as_may_queue(request_queue_t *q, int rw, int is_sync)
 {
     int ret = ELV_MQUEUE_MAY;
     struct as_data *ad = q->elevator->elevator_data;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index b6491c0..882a15a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -226,7 +226,7 @@ static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
     /*
      * Use the per-process queue, for read requests and syncronous writes
      */
-    if (!(rw & REQ_RW) || is_sync)
+    if (!(rw == WRITE) || is_sync)
         return task->pid;

     return CFQ_KEY_ASYNC;
@@ -1787,14 +1787,14 @@ static inline int __cfq_may_queue(struct cfq_queue *cfqq)
     return ELV_MQUEUE_MAY;
 }

-static int cfq_may_queue(request_queue_t *q, int rw)
+static int cfq_may_queue(request_queue_t *q, int rw, int is_sync)
 {
     struct cfq_data *cfqd = q->elevator->elevator_data;
     struct task_struct *tsk = current;
     struct cfq_queue *cfqq;
     unsigned int key;

-    key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
+    key = cfq_queue_pid(tsk, rw, is_sync);

     /*
      * don't force setup of a queue from here, as a call to may_queue
@@ -1821,7 +1821,7 @@ static void cfq_put_request(struct request *rq)
     struct cfq_queue *cfqq = RQ_CFQQ(rq);

     if (cfqq) {
-        const int rw = rq_data_dir(rq);
+        const int rw = rq_rw_dir(rq);

         BUG_ON(!cfqq->allocated[rw]);
         cfqq->allocated[rw]--;
@@ -1844,7 +1844,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
     struct cfq_data *cfqd = q->elevator->elevator_data;
     struct task_struct *tsk = current;
     struct cfq_io_context *cic;
-    const int rw = rq_data_dir(rq);
+    const int rw = rq_rw_dir(rq);
     const int is_sync = rq_is_sync(rq);
     pid_t key = cfq_queue_pid(tsk, rw, is_sync);
     struct cfq_queue *cfqq;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 6d673e9..e605c09 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -53,7 +53,7 @@ struct deadline_data {

 static void deadline_move_request(struct deadline_data *, struct request *);

-#define RQ_RB_ROOT(dd, rq)    (&(dd)->sort_list[rq_data_dir((rq))])
+#define RQ_RB_ROOT(dd, rq)    (&(dd)->sort_list[rq_rw_dir((rq))])

 static void
 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
@@ -72,7 +72,7 @@ retry:
 static inline void
 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
 {
-    const int data_dir = rq_data_dir(rq);
+    const int data_dir = rq_rw_dir(rq);

     if (dd->next_rq[data_dir] == rq) {
         struct rb_node *rbnext = rb_next(&rq->rb_node);
@@ -92,7 +92,7 @@ static void
 deadline_add_request(struct request_queue *q, struct request *rq)
 {
     struct deadline_data *dd = q->elevator->elevator_data;
-    const int data_dir = rq_data_dir(rq);
+    const int data_dir = rq_rw_dir(rq);

     deadline_add_rq_rb(dd, rq);

@@ -197,7 +197,7 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
 static void
 deadline_move_request(struct deadline_data *dd, struct request *rq)
 {
-    const int data_dir = rq_data_dir(rq);
+    const int data_dir = rq_rw_dir(rq);
     struct rb_node *rbnext = rb_next(&rq->rb_node);

     dd->next_rq[READ] = NULL;
diff --git a/block/elevator.c b/block/elevator.c
index 96a00c8..b73feec 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -76,7 +76,7 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
     /*
      * different data direction or already started, don't merge
      */
-    if (bio_data_dir(bio) != rq_data_dir(rq))
+    if (bio_data_dir(bio) != rq_rw_dir(rq))
         return 0;

     /*
@@ -413,7 +413,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
     list_for_each_prev(entry, &q->queue_head) {
         struct request *pos = list_entry_rq(entry);

-        if (rq_data_dir(rq) != rq_data_dir(pos))
+        if (rq_uni_rw_dir(rq) != rq_uni_rw_dir(pos))
             break;
         if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
             break;
@@ -733,7 +733,8 @@ struct request *elv_next_request(request_queue_t *q)
             blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
         }

-        if (!q->boundary_rq || q->boundary_rq == rq) {
+        if ((!q->boundary_rq || q->boundary_rq == rq) &&
+            !rq_bidi_dir(rq)) {
             q->end_sector = rq_end_sector(rq);
             q->boundary_rq = NULL;
         }
@@ -845,12 +846,12 @@ void elv_put_request(request_queue_t *q, struct request *rq)
         e->ops->elevator_put_req_fn(rq);
 }

-int elv_may_queue(request_queue_t *q, int rw)
+int elv_may_queue(request_queue_t *q, int rw, int is_sync)
 {
     elevator_t *e = q->elevator;

     if (e->ops->elevator_may_queue_fn)
-        return e->ops->elevator_may_queue_fn(q, rw);
+        return e->ops->elevator_may_queue_fn(q, rw, is_sync);

     return ELV_MQUEUE_MAY;
 }
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 3de0695..e7ae7d4 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -431,8 +431,9 @@ static inline struct request *start_ordered(request_queue_t *q,
     rq = &q->bar_rq;
     rq->cmd_flags = 0;
     rq_init(q, rq);
-    if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
-        rq->cmd_flags |= REQ_RW;
+    rq->data_dir = bio_data_dir(q->orig_bar_rq->bio) ?
+        DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
     rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
     rq->elevator_private = NULL;
     rq->elevator_private2 = NULL;
@@ -1958,18 +1959,25 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
 }

 static struct request *
-blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask)
+blk_alloc_request(request_queue_t *q, enum dma_data_direction dir,
+    int priv, gfp_t gfp_mask)
 {
     struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);

     if (!rq)
         return NULL;

+    rq->cmd_flags = REQ_ALLOCED;
+
+    BUG_ON(!(dma_uni_dir(dir) || (dir == DMA_BIDIRECTIONAL)));
+    rq->data_dir = dir;
+
     /*
-     * first three bits are identical in rq->cmd_flags and bio->bi_rw,
-     * see bio.h and blkdev.h
+     * FIXME: Safeguard from unqueued requests
+     * that were not allocated/initted by us
      */
-    rq->cmd_flags = rw | REQ_ALLOCED;
+    if (dir == DMA_BIDIRECTIONAL)
+        rq->cmd_flags |= REQ_BIDI;

     if (priv) {
         if (unlikely(elv_set_request(q, rq, gfp_mask))) {
@@ -2055,16 +2063,17 @@ static void freed_request(request_queue_t *q, int rw, int priv)
  * Returns NULL on failure, with queue_lock held.
  * Returns !NULL on success, with queue_lock *not held*.
  */
-static struct request *get_request(request_queue_t *q, int rw_flags,
-                   struct bio *bio, gfp_t gfp_mask)
+static struct request *get_request(request_queue_t *q,
+    enum dma_data_direction dir, struct bio *bio, gfp_t gfp_mask)
 {
     struct request *rq = NULL;
     struct request_list *rl = &q->rq;
     struct io_context *ioc = NULL;
-    const int rw = rw_flags & 0x01;
     int may_queue, priv;
+    int rw = dma_write_dir(dir);
+    int is_sync = (rw==READ) || (bio && bio_sync(bio));

-    may_queue = elv_may_queue(q, rw_flags);
+    may_queue = elv_may_queue(q, rw, is_sync);
     if (may_queue == ELV_MQUEUE_NO)
         goto rq_starved;

@@ -2112,7 +2121,7 @@ static struct request *get_request(request_queue_t *q, int rw_flags,

     spin_unlock_irq(q->queue_lock);

-    rq = blk_alloc_request(q, rw_flags, priv, gfp_mask);
+    rq = blk_alloc_request(q, dir, priv, gfp_mask);
     if (unlikely(!rq)) {
         /*
          * Allocation failed presumably due to memory. Undo anything
@@ -2160,13 +2169,13 @@ out:
  *
  * Called with q->queue_lock held, and returns with it unlocked.
  */
-static struct request *get_request_wait(request_queue_t *q, int rw_flags,
-                    struct bio *bio)
+static struct request *get_request_wait(request_queue_t *q,
+    enum dma_data_direction dir, struct bio *bio)
 {
-    const int rw = rw_flags & 0x01;
+    const int rw = dma_write_dir(dir);
     struct request *rq;

-    rq = get_request(q, rw_flags, bio, GFP_NOIO);
+    rq = get_request(q, dir, bio, GFP_NOIO);
     while (!rq) {
         DEFINE_WAIT(wait);
         struct request_list *rl = &q->rq;
@@ -2174,7 +2183,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags,
         prepare_to_wait_exclusive(&rl->wait[rw], &wait,
                 TASK_UNINTERRUPTIBLE);

-        rq = get_request(q, rw_flags, bio, GFP_NOIO);
+        rq = get_request(q, dir, bio, GFP_NOIO);

         if (!rq) {
             struct io_context *ioc;
@@ -2202,17 +2211,16 @@ static struct request *get_request_wait(request_queue_t *q, int rw_flags,
     return rq;
 }

-struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
+struct request *blk_get_request(request_queue_t *q,
+    enum dma_data_direction dir, gfp_t gfp_mask)
 {
     struct request *rq;

-    BUG_ON(rw != READ && rw != WRITE);
-
     spin_lock_irq(q->queue_lock);
     if (gfp_mask & __GFP_WAIT) {
-        rq = get_request_wait(q, rw, NULL);
+        rq = get_request_wait(q, dir, NULL);
     } else {
-        rq = get_request(q, rw, NULL, gfp_mask);
+        rq = get_request(q, dir, NULL, gfp_mask);
         if (!rq)
             spin_unlock_irq(q->queue_lock);
     }
@@ -2223,6 +2231,22 @@ struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask)
 EXPORT_SYMBOL(blk_get_request);

 /**
+ * blk_rq_init_unqueued_req - Initialize a request that does not come from a Q
+ *
+ * Description:
+ * Drivers that need to send a request that does not belong to a Q, like on
+ * the stack, should not bzero the request.
+ * They should call this function instead to initialize the request.
+ * It should never be called with a request that was allocated by a Q.
+ */
+void blk_rq_init_unqueued_req(struct request * rq)
+{
+    memset(rq, 0, sizeof(*rq));
+    rq->data_dir = DMA_FROM_DEVICE;
+}
+EXPORT_SYMBOL(blk_rq_init_unqueued_req);
+
+/**
  * blk_start_queueing - initiate dispatch of requests to device
  * @q:        request queue to kick into gear
  *
@@ -2335,7 +2359,7 @@ static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
     struct bio *bio, *orig_bio;
     int reading, ret;

-    reading = rq_data_dir(rq) == READ;
+    reading = rq_rw_dir(rq) == READ;

     /*
      * if alignment requirement is satisfied, map in user pages for
@@ -2479,7 +2503,7 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
     /* we don't allow misaligned data like bio_map_user() does.  If the
      * user is using sg, they're expected to know the alignment constraints
      * and respect them accordingly */
-    bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ);
+    bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_rw_dir(rq)== READ);
     if (IS_ERR(bio))
         return PTR_ERR(bio);

@@ -2552,7 +2576,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
     if (IS_ERR(bio))
         return PTR_ERR(bio);

-    if (rq_data_dir(rq) == WRITE)
+    if (dma_write_dir(rq->data_dir))
         bio->bi_rw |= (1 << BIO_RW);

     blk_rq_bio_prep(q, rq, bio);
@@ -2663,7 +2687,7 @@ EXPORT_SYMBOL(blkdev_issue_flush);

 static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
 {
-    int rw = rq_data_dir(rq);
+    int rw = rq_rw_dir(rq);

     if (!blk_fs_request(rq) || !rq->rq_disk)
         return;
@@ -2741,7 +2765,7 @@ void __blk_put_request(request_queue_t *q, struct request *req)
      * it didn't come out of our reserved rq pools
      */
     if (req->cmd_flags & REQ_ALLOCED) {
-        int rw = rq_data_dir(req);
+        int rw = rq_rw_dir(req);
         int priv = req->cmd_flags & REQ_ELVPRIV;

         BUG_ON(!list_empty(&req->queuelist));
@@ -2807,7 +2831,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
     if (req->sector + req->nr_sectors != next->sector)
         return 0;

-    if (rq_data_dir(req) != rq_data_dir(next)
+    if (rq_dma_dir(req) != rq_dma_dir(next)
         || req->rq_disk != next->rq_disk
         || next->special)
         return 0;
@@ -2908,7 +2932,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
     int el_ret, nr_sectors, barrier, err;
     const unsigned short prio = bio_prio(bio);
     const int sync = bio_sync(bio);
-    int rw_flags;

     nr_sectors = bio_sectors(bio);

@@ -2983,19 +3006,11 @@ static int __make_request(request_queue_t *q, struct bio *bio)

 get_rq:
     /*
-     * This sync check and mask will be re-done in init_request_from_bio(),
-     * but we need to set it earlier to expose the sync flag to the
-     * rq allocator and io schedulers.
-     */
-    rw_flags = bio_data_dir(bio);
-    if (sync)
-        rw_flags |= REQ_RW_SYNC;
-
-    /*
      * Grab a free request. This is might sleep but can not fail.
      * Returns with the queue unlocked.
      */
-    req = get_request_wait(q, rw_flags, bio);
+    req = get_request_wait(q,
+        bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, bio);

     /*
      * After dropping the lock and possibly sleeping here, our request
@@ -3346,7 +3361,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
     if (!blk_pc_request(req))
         req->errors = 0;

-    if (!uptodate) {
+    if (error) {
         if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
             printk("end_request: I/O error, dev %s, sector %llu\n",
                 req->rq_disk ? req->rq_disk->disk_name : "?",
@@ -3354,7 +3369,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
     }

     if (blk_fs_request(req) && req->rq_disk) {
-        const int rw = rq_data_dir(req);
+        const int rw = rq_rw_dir(req);

         disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
     }
@@ -3578,7 +3593,7 @@ void end_that_request_last(struct request *req, int uptodate)
      */
     if (disk && blk_fs_request(req) && req != &req->q->bar_rq) {
         unsigned long duration = jiffies - req->start_time;
-        const int rw = rq_data_dir(req);
+        const int rw = rq_rw_dir(req);

         __disk_stat_inc(disk, ios[rw]);
         __disk_stat_add(disk, ticks[rw], duration);
@@ -3606,8 +3621,20 @@ EXPORT_SYMBOL(end_request);

 void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
 {
-    /* first two bits are identical in rq->cmd_flags and bio->bi_rw */
-    rq->cmd_flags |= (bio->bi_rw & 3);
+    rq->data_dir = bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+    if (bio->bi_rw & (1<<BIO_RW_SYNC))
+        rq->cmd_flags |= REQ_RW_SYNC;
+    else
+        rq->cmd_flags &= ~REQ_RW_SYNC;
+    /* FIXME: what about other flags, should we sync these too? */
+    /*
+    BIO_RW_AHEAD    ==> ??
+    BIO_RW_BARRIER    ==> REQ_SOFTBARRIER/REQ_HARDBARRIER
+    BIO_RW_FAILFAST    ==> REQ_FAILFAST
+    BIO_RW_SYNC    ==> REQ_RW_SYNC
+    BIO_RW_META    ==> REQ_RW_META
+    */

     rq->nr_phys_segments = bio_phys_segments(q, bio);
     rq->nr_hw_segments = bio_hw_segments(q, bio);
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 65c6a3c..8fc6b65 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -254,7 +254,8 @@ static int sg_io(struct file *file, request_queue_t *q,
             break;
         }

-    rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL);
+    rq = blk_get_request(q,
+        writing ? DMA_TO_DEVICE : DMA_FROM_DEVICE, GFP_KERNEL);
     if (!rq)
         return -ENOMEM;

@@ -410,7 +411,8 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
         memset(buffer, 0, bytes);
     }

-    rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
+    rq = blk_get_request(q,
+        in_len ? DMA_TO_DEVICE : DMA_FROM_DEVICE, __GFP_WAIT);

     cmdlen = COMMAND_SIZE(opcode);

@@ -495,10 +497,8 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
     struct request *rq;
     int err;

-    rq = blk_get_request(q, WRITE, __GFP_WAIT);
+    rq = blk_get_request(q, DMA_NONE, __GFP_WAIT);
     rq->cmd_type = REQ_TYPE_BLOCK_PC;
-    rq->data = NULL;
-    rq->data_len = 0;
     rq->timeout = BLK_DEFAULT_TIMEOUT;
     memset(rq->cmd, 0, sizeof(rq->cmd));
     rq->cmd[0] = cmd;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 92bf868..8ba5142 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -3322,7 +3322,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
     if (Command == NULL)
         return 0;

-    if (rq_data_dir(Request) == READ) {
+    if (rq_uni_rw_dir(Request) == READ) {
         Command->DmaDirection = PCI_DMA_FROMDEVICE;
         Command->CommandType = DAC960_ReadCommand;
     } else {
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 5d65621..54f2fb3 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1379,7 +1379,7 @@ static void redo_fd_request(void)
                "0x%08lx\n", track, sector, data);
 #endif

-        if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
+        if (!dma_uni_dir(CURRENT->data_dir)) {
             printk(KERN_WARNING "do_fd_request: unknown command\n");
             end_request(CURRENT, 0);
             goto repeat;
@@ -1389,7 +1389,7 @@ static void redo_fd_request(void)
             goto repeat;
         }

-        switch (rq_data_dir(CURRENT)) {
+        switch (rq_rw_dir(CURRENT)) {
         case READ:
             memcpy(data, floppy->trackbuf + sector * 512, 512);
             break;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 14d6b94..b940802 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1453,7 +1453,7 @@ repeat:
     del_timer( &motor_off_timer );

     ReqCnt = 0;
-    ReqCmd = rq_data_dir(CURRENT);
+    ReqCmd = rq_uni_rw_dir(CURRENT);
     ReqBlock = CURRENT->sector;
     ReqBuffer = CURRENT->buffer;
     setup_req_params( drive );
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 072e18e..d87eecc 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1237,7 +1237,7 @@ static void cciss_softirq_done(struct request *rq)
     complete_buffers(rq->bio, rq->errors);

     if (blk_fs_request(rq)) {
-        const int rw = rq_data_dir(rq);
+        const int rw = rq_rw_dir(rq);

         disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
     }
@@ -2501,10 +2501,10 @@ static void do_cciss_request(request_queue_t *q)
     c->Request.Type.Type = TYPE_CMD;    // It is a command.
     c->Request.Type.Attribute = ATTR_SIMPLE;
     c->Request.Type.Direction =
-        (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
+        (rq_uni_rw_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
     c->Request.Timeout = 0;    // Don't time out
     c->Request.CDB[0] =
-        (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
+        (rq_uni_rw_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
     start_blk = creq->sector;
 #ifdef CCISS_DEBUG
     printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector,
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b94cd1c..59b62cf 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -922,7 +922,7 @@ DBGPX(
     seg = blk_rq_map_sg(q, creq, tmp_sg);

     /* Now do all the DMA Mappings */
-    if (rq_data_dir(creq) == READ)
+    if (rq_uni_rw_dir(creq) == READ)
         dir = PCI_DMA_FROMDEVICE;
     else
         dir = PCI_DMA_TODEVICE;
@@ -937,7 +937,7 @@ DBGPX(
 DBGPX(    printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
     c->req.hdr.sg_cnt = seg;
     c->req.hdr.blk_cnt = creq->nr_sectors;
-    c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
+    c->req.hdr.cmd = (rq_uni_rw_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
     c->type = CMD_RWREQ;

     /* Put the request on the tail of the request queue */
@@ -1033,7 +1033,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
     complete_buffers(rq->bio, ok);

     if (blk_fs_request(rq)) {
-        const int rw = rq_data_dir(rq);
+        const int rw = rq_rw_dir(rq);

         disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors);
     }
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 5231ed7..d27b8aa 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2325,7 +2325,7 @@ static void request_done(int uptodate)
         floppy_end_request(req, 1);
         spin_unlock_irqrestore(q->queue_lock, flags);
     } else {
-        if (rq_data_dir(req) == WRITE) {
+        if (rq_uni_rw_dir(req) == WRITE) {
             /* record write error information */
             DRWE->write_errors++;
             if (DRWE->write_errors == 1) {
@@ -2628,15 +2628,12 @@ static int make_raw_rw_request(void)
     raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
         FD_RAW_NEED_SEEK;
     raw_cmd->cmd_count = NR_RW;
-    if (rq_data_dir(current_req) == READ) {
+    if (rq_uni_rw_dir(current_req) == READ) {
         raw_cmd->flags |= FD_RAW_READ;
         COMMAND = FM_MODE(_floppy, FD_READ);
-    } else if (rq_data_dir(current_req) == WRITE) {
+    } else {
         raw_cmd->flags |= FD_RAW_WRITE;
         COMMAND = FM_MODE(_floppy, FD_WRITE);
-    } else {
-        DPRINT("make_raw_rw_request: unknown command\n");
-        return 0;
     }

     max_sector = _floppy->sect * _floppy->head;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 090796b..411e138 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -434,7 +434,7 @@ static void do_nbd_request(request_queue_t * q)
         BUG_ON(lo->magic != LO_MAGIC);

         nbd_cmd(req) = NBD_CMD_READ;
-        if (rq_data_dir(req) == WRITE) {
+        if (rq_uni_rw_dir(req) == WRITE) {
             nbd_cmd(req) = NBD_CMD_WRITE;
             if (lo->flags & NBD_READ_ONLY) {
                 printk(KERN_ERR "%s: Write on read-only\n",
@@ -502,6 +502,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
     dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
             lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);

+    blk_rq_init_unqueued_req(&sreq);
     switch (cmd) {
     case NBD_DISCONNECT:
             printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index c852eed..375499e 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -722,7 +722,7 @@ static void do_pcd_request(request_queue_t * q)
         if (!pcd_req)
             return;

-        if (rq_data_dir(pcd_req) == READ) {
+        if (rq_dma_dir(pcd_req) == DMA_FROM_DEVICE) {
             struct pcd_unit *cd = pcd_req->rq_disk->private_data;
             if (cd != pcd_current)
                 pcd_bufblk = -1;
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 31e0148..0db4e8a 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -441,21 +441,18 @@ static enum action do_pd_io_start(void)
         return pd_special();
     }

-    pd_cmd = rq_data_dir(pd_req);
-    if (pd_cmd == READ || pd_cmd == WRITE) {
-        pd_block = pd_req->sector;
-        pd_count = pd_req->current_nr_sectors;
-        if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
-            return Fail;
-        pd_run = pd_req->nr_sectors;
-        pd_buf = pd_req->buffer;
-        pd_retries = 0;
-        if (pd_cmd == READ)
-            return do_pd_read_start();
-        else
-            return do_pd_write_start();
-    }
-    return Fail;
+    pd_cmd = rq_uni_rw_dir(pd_req);
+    pd_block = pd_req->sector;
+    pd_count = pd_req->current_nr_sectors;
+    if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
+        return Fail;
+    pd_run = pd_req->nr_sectors;
+    pd_buf = pd_req->buffer;
+    pd_retries = 0;
+    if (pd_cmd == READ)
+        return do_pd_read_start();
+    else
+        return do_pd_write_start();
 }

 static enum action pd_special(void)
@@ -716,7 +713,7 @@ static int pd_special_command(struct pd_unit *disk,
     struct request rq;
     int err = 0;

-    memset(&rq, 0, sizeof(rq));
+    blk_rq_init_unqueued_req(&rq);
     rq.errors = 0;
     rq.rq_disk = disk->gd;
     rq.ref_count = 1;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 7cdaa19..47e9bcb 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -779,20 +779,15 @@ repeat:
         goto repeat;
     }

-    pf_cmd = rq_data_dir(pf_req);
+    pf_cmd = rq_uni_rw_dir(pf_req);
     pf_buf = pf_req->buffer;
     pf_retries = 0;

     pf_busy = 1;
     if (pf_cmd == READ)
         pi_do_claimed(pf_current->pi, do_pf_read);
-    else if (pf_cmd == WRITE)
+    else
         pi_do_claimed(pf_current->pi, do_pf_write);
-    else {
-        pf_busy = 0;
-        pf_end_request(0);
-        goto repeat;
-    }
 }

 static int pf_next_buf(void)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a4fb703..4c2413b 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -758,7 +758,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
     int ret = 0;

     rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
-                 WRITE : READ, __GFP_WAIT);
+                 DMA_TO_DEVICE : DMA_FROM_DEVICE, __GFP_WAIT);

     if (cgc->buflen) {
         if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index 688a4fb..cf06abd 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -506,11 +506,12 @@ static void do_ps2esdi_request(request_queue_t * q)
         return;
     }

-    switch (rq_data_dir(req)) {
-    case READ:
+    switch (rq_dma_dir(req)) {
+    case DMA_FROM_DEVICE:
+    case DMA_NONE:
         ps2esdi_readwrite(READ, req);
         break;
-    case WRITE:
+    case DMA_TO_DEVICE:
         ps2esdi_readwrite(WRITE, req);
         break;
     default:
@@ -859,7 +860,7 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
     case INT_TRANSFER_REQ:
         ps2esdi_prep_dma(current_req->buffer,
                  current_req->current_nr_sectors,
-            (rq_data_dir(current_req) == READ)
+            (rq_uni_rw_dir(current_req) == READ)
             ? MCA_DMA_MODE_16 | MCA_DMA_MODE_WRITE | MCA_DMA_MODE_XFER
             : MCA_DMA_MODE_16 | MCA_DMA_MODE_READ);
         outb(CTRL_ENABLE_DMA | CTRL_ENABLE_INTR, ESDI_CONTROL);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 1a65979..91eb6f9 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -336,7 +336,7 @@ static void start_request(struct floppy_state *fs)
             continue;
         }

-        if (rq_data_dir(req) == WRITE) {
+        if (rq_uni_rw_dir(req) == WRITE) {
             if (fs->write_prot < 0)
                 fs->write_prot = swim3_readbit(fs, WRITE_PROT);
             if (fs->write_prot) {
@@ -432,7 +432,7 @@ static inline void setup_transfer(struct floppy_state *fs)
         printk(KERN_ERR "swim3: transfer 0 sectors?\n");
         return;
     }
-    if (rq_data_dir(fd_req) == WRITE)
+    if (rq_uni_rw_dir(fd_req) == WRITE)
         n = 1;
     else {
         n = fs->secpertrack - fs->req_sector + 1;
@@ -445,7 +445,7 @@ static inline void setup_transfer(struct floppy_state *fs)
     out_8(&sw->nsect, n);
     out_8(&sw->gap3, 0);
     out_le32(&dr->cmdptr, virt_to_bus(cp));
-    if (rq_data_dir(fd_req) == WRITE) {
+    if (rq_uni_rw_dir(fd_req) == WRITE) {
         /* Set up 3 dma commands: write preamble, data, postamble */
         init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
         ++cp;
@@ -460,7 +460,7 @@ static inline void setup_transfer(struct floppy_state *fs)
     out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
     in_8(&sw->error);
     out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
-    if (rq_data_dir(fd_req) == WRITE)
+    if (rq_uni_rw_dir(fd_req) == WRITE)
         out_8(&sw->control_bis, WRITE_SECTORS);
     in_8(&sw->intr);
     out_le32(&dr->control, (RUN << 16) | RUN);
@@ -609,7 +609,7 @@ static void xfer_timeout(unsigned long data)
     out_8(&sw->intr_enable, 0);
     out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
     out_8(&sw->select, RELAX);
-    if (rq_data_dir(fd_req) == WRITE)
+    if (rq_uni_rw_dir(fd_req) == WRITE)
         ++cp;
     if (ld_le16(&cp->xfer_status) != 0)
         s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
@@ -617,8 +617,8 @@ static void xfer_timeout(unsigned long data)
         s = 0;
     fd_req->sector += s;
     fd_req->current_nr_sectors -= s;
-    printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
-           (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
+    printk(KERN_ERR "swim3: timeout %s sector %ld\n",
+           dma_dir_to_string(fd_rq->data_dir), (long)fd_req->sector);
     end_request(fd_req, 0);
     fs->state = idle;
     start_request(fs);
@@ -636,8 +636,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
     intr = in_8(&sw->intr);
     err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
     if ((intr & ERROR_INTR) && fs->state != do_transfer)
-        printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
-               fs->state, rq_data_dir(fd_req), intr, err);
+        printk(KERN_ERR "swim3_interrupt, state=%d, dir=%d, intr=%x, err=%x\n",
+               fs->state, rq_dma_dir(fd_req), intr, err);
     switch (fs->state) {
     case locating:
         if (intr & SEEN_SECTOR) {
@@ -698,7 +698,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
         fs->timeout_pending = 0;
         dr = fs->dma;
         cp = fs->dma_cmd;
-        if (rq_data_dir(fd_req) == WRITE)
+        if (rq_uni_rw_dir(fd_req) == WRITE)
             ++cp;
         /*
          * Check that the main data transfer has finished.
@@ -733,7 +733,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                 act(fs);
             } else {
                 printk("swim3: error %sing block %ld (err=%x)\n",
-                       rq_data_dir(fd_req) == WRITE? "writ": "read",
+                       rq_rw_dir(fd_req) == WRITE? "writ": "read",
                        (long)fd_req->sector, err);
                 end_request(fd_req, 0);
                 fs->state = idle;
@@ -742,8 +742,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
             if ((stat & ACTIVE) == 0 || resid != 0) {
                 /* musta been an error */
                 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
-                printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
-                       fs->state, rq_data_dir(fd_req), intr, err);
+                printk(KERN_ERR "  state=%d, dir=%d, intr=%x, err=%x\n",
+                       fs->state, rq_dma_dir(fd_req), intr, err);
                 end_request(fd_req, 0);
                 fs->state = idle;
                 start_request(fs);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 54509eb..61df2a0 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -565,7 +565,7 @@ static struct carm_request *carm_get_special(struct carm_host *host)
     if (!crq)
         return NULL;

-    rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
+    rq = blk_get_request(host->oob_q, DMA_TO_DEVICE /* bogus */, GFP_KERNEL);
     if (!rq) {
         spin_lock_irqsave(&host->lock, flags);
         carm_put_request(host, crq);
@@ -860,7 +860,7 @@ queue_one_request:

     blkdev_dequeue_request(rq);

-    if (rq_data_dir(rq) == WRITE) {
+    if (rq_uni_rw_dir(rq) == WRITE) {
         writing = 1;
         pci_dir = PCI_DMA_TODEVICE;
     } else {
@@ -1053,7 +1053,7 @@ static inline void carm_handle_rw(struct carm_host *host,

     VPRINTK("ENTER\n");

-    if (rq_data_dir(crq->rq) == WRITE)
+    if (rq_uni_rw_dir(crq->rq) == WRITE)
         pci_dir = PCI_DMA_TODEVICE;
     else
         pci_dir = PCI_DMA_FROMDEVICE;
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 2098eff..c789a37 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -709,11 +709,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
     struct request *rq = urq->rq;
     unsigned int block, nblks;

-    if (rq_data_dir(rq) == WRITE)
-        cmd->dir = UB_DIR_WRITE;
-    else
-        cmd->dir = UB_DIR_READ;
-
+    cmd->dir = rq_uni_rw_dir(rq) ? UB_DIR_WRITE : UB_DIR_READ;
     cmd->nsg = urq->nsg;
     memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg);

@@ -747,7 +743,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
     if (rq->data_len == 0) {
         cmd->dir = UB_DIR_NONE;
     } else {
-        if (rq_data_dir(rq) == WRITE)
+        if (rq_uni_rw_dir(rq) == WRITE)
             cmd->dir = UB_DIR_WRITE;
         else
             cmd->dir = UB_DIR_READ;
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
index 68592c3..ff960c6 100644
--- a/drivers/block/viodasd.c
+++ b/drivers/block/viodasd.c
@@ -302,7 +302,7 @@ static int send_request(struct request *req)

     start = (u64)req->sector << 9;

-    if (rq_data_dir(req) == READ) {
+    if (rq_uni_rw_dir(req) == READ) {
         direction = DMA_FROM_DEVICE;
         viocmd = viomajorsubtype_blockio | vioblockread;
         statindex = 0;
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 0d97b7e..3a4a377 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -308,7 +308,7 @@ static void do_xd_request (request_queue_t * q)
     while ((req = elv_next_request(q)) != NULL) {
         unsigned block = req->sector;
         unsigned count = req->nr_sectors;
-        int rw = rq_data_dir(req);
+        int rw = rq_rw_dir(req);
         XD_INFO *disk = req->rq_disk->private_data;
         int res = 0;
         int retry;
@@ -321,7 +321,7 @@ static void do_xd_request (request_queue_t * q)
             end_request(req, 0);
             continue;
         }
-        if (rw != READ && rw != WRITE) {
+        if (!dma_uni_dir(req->data_dir)) {
             printk("do_xd_request: unknown request\n");
             end_request(req, 0);
             continue;
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 7cc2685..6cc92b3 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -89,7 +89,7 @@ static void do_z2_request(request_queue_t *q)
             if (len < size)
                 size = len;
             addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
-            if (rq_data_dir(req) == READ)
+            if (rq_uni_rw_dir(req) == READ)
                 memcpy(req->buffer, (char *)addr, size);
             else
                 memcpy((char *)addr, req->buffer, size);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index b36f44d..53f383d 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2103,7 +2103,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
     if (!q)
         return -ENXIO;

-    rq = blk_get_request(q, READ, GFP_KERNEL);
+    rq = blk_get_request(q, DMA_FROM_DEVICE, GFP_KERNEL);
     if (!rq)
         return -ENOMEM;

diff --git a/drivers/cdrom/cdu31a.c b/drivers/cdrom/cdu31a.c
index 2157c58..2eb37bf 100644
--- a/drivers/cdrom/cdu31a.c
+++ b/drivers/cdrom/cdu31a.c
@@ -1342,7 +1342,7 @@ static void do_cdu31a_request(request_queue_t * q)
             end_request(req, 0);
             continue;
         }
-        if (rq_data_dir(req) == WRITE) {
+        if (rq_rw_dir(req) == WRITE) {
             end_request(req, 0);
             continue;
         }
diff --git a/drivers/cdrom/gscd.c b/drivers/cdrom/gscd.c
index b3ab6e9..8411f8c 100644
--- a/drivers/cdrom/gscd.c
+++ b/drivers/cdrom/gscd.c
@@ -265,7 +265,7 @@ repeat:
         goto out;

     if (req->cmd != READ) {
-        printk("GSCD: bad cmd %u\n", rq_data_dir(req));
+        printk("GSCD: bad cmd %u\n", rq_dma_dir(req));
         end_request(req, 0);
         goto repeat;
     }
diff --git a/drivers/cdrom/sbpcd.c b/drivers/cdrom/sbpcd.c
index a1283b1..f345876 100644
--- a/drivers/cdrom/sbpcd.c
+++ b/drivers/cdrom/sbpcd.c
@@ -4550,7 +4550,7 @@ static void do_sbpcd_request(request_queue_t * q)
     spin_unlock_irq(q->queue_lock);

     down(&ioctl_read_sem);
-    if (rq_data_dir(elv_next_request(q)) != READ)
+    if (rq_rw_dir(elv_next_request(q)) != READ)
     {
         msg(DBG_INF, "bad cmd %d\n", req->cmd[0]);
         goto err_done;
diff --git a/drivers/cdrom/sonycd535.c b/drivers/cdrom/sonycd535.c
index f77ada9..3516cc7 100644
--- a/drivers/cdrom/sonycd535.c
+++ b/drivers/cdrom/sonycd535.c
@@ -816,7 +816,7 @@ do_cdu535_request(request_queue_t * q)
             end_request(req, 0);
             continue;
         }
-        if (rq_data_dir(req) == WRITE) {
+        if (rq_rw_dir(req) == WRITE) {
             end_request(req, 0);
             continue;
         }
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 44cd7b2..9afb9e9 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -338,7 +338,7 @@ static int send_request(struct request *req)

     BUG_ON(req->nr_phys_segments > 1);

-    if (rq_data_dir(req) == READ) {
+    if (rq_uni_rw_dir(req) == READ) {
         direction = DMA_FROM_DEVICE;
         cmd = viomajorsubtype_cdio | viocdread;
     } else {
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index e2953fc..ff14c3c 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -214,10 +214,7 @@ static void icside_build_sglist(ide_drive_t *drive, struct request *rq)

     ide_map_sg(drive, rq);

-    if (rq_data_dir(rq) == READ)
-        hwif->sg_dma_direction = DMA_FROM_DEVICE;
-    else
-        hwif->sg_dma_direction = DMA_TO_DEVICE;
+    hwif->sg_dma_direction = rq_dma_dir(rq);

     hwif->sg_nents = dma_map_sg(state->dev, sg, hwif->sg_nents,
                     hwif->sg_dma_direction);
@@ -392,7 +389,7 @@ static int icside_dma_setup(ide_drive_t *drive)
     struct request *rq = hwif->hwgroup->rq;
     unsigned int dma_mode;

-    if (rq_data_dir(rq))
+    if (rq_uni_rw_dir(rq))
         dma_mode = DMA_MODE_WRITE;
     else
         dma_mode = DMA_MODE_READ;
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index 556455f..090269b 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -1060,7 +1060,7 @@ static int cris_dma_setup(ide_drive_t *drive)
 {
     struct request *rq = drive->hwif->hwgroup->rq;

-    cris_ide_initialize_dma(!rq_data_dir(rq));
+    cris_ide_initialize_dma(!rq_uni_rw_dir(rq));
     if (cris_ide_build_dmatable (drive)) {
         ide_map_sg(drive, rq);
         return 1;
@@ -1082,7 +1082,7 @@ static void cris_dma_exec_cmd(ide_drive_t *drive, u8 command)
 static void cris_dma_start(ide_drive_t *drive)
 {
     struct request *rq = drive->hwif->hwgroup->rq;
-    int writing = rq_data_dir(rq);
+    int writing = rq_uni_rw_dir(rq);
     int type = TYPE_DMA;

     if (drive->current_speed >= XFER_UDMA_0)
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 45a928c..0387997 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -767,7 +767,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)

         if (sense_key == NOT_READY) {
             /* Tray open. */
-            if (rq_data_dir(rq) == READ) {
+            if (rq_uni_rw_dir(rq) == READ) {
                 cdrom_saw_media_change (drive);

                 /* Fail the request. */
@@ -1730,7 +1730,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
     /*
      * check which way to transfer data
      */
-    if (rq_data_dir(rq) == WRITE) {
+    if (rq_uni_rw_dir(rq) == WRITE) {
         /*
          * write to drive
          */
@@ -2021,10 +2021,13 @@ ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
             }
             CDROM_CONFIG_FLAGS(drive)->seeking = 0;
         }
-        if ((rq_data_dir(rq) == READ) && IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) {
+        if ((rq_uni_rw_dir(rq) == READ) &&
+            IDE_LARGE_SEEK(info->last_block, block,
+                IDECD_SEEK_THRESHOLD) &&
+            drive->dsc_overlap) {
             action = cdrom_start_seek(drive, block);
         } else {
-            if (rq_data_dir(rq) == READ)
+            if (rq_uni_rw_dir(rq) == READ)
                 action = cdrom_start_read(drive, block);
             else
                 action = cdrom_start_write(drive, rq);
@@ -3066,7 +3069,7 @@ static int ide_cdrom_prep_fs(request_queue_t *q, struct request *rq)

     memset(rq->cmd, 0, sizeof(rq->cmd));

-    if (rq_data_dir(rq) == READ)
+    if (rq_uni_rw_dir(rq) == READ)
         rq->cmd[0] = GPCMD_READ_10;
     else
         rq->cmd[0] = GPCMD_WRITE_10;
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index 37aa6dd..5d66966 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -253,7 +253,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,

     if (dma) {
         if (!hwif->dma_setup(drive)) {
-            if (rq_data_dir(rq)) {
+            if (rq_uni_rw_dir(rq)) {
                 command = lba48 ? WIN_WRITEDMA_EXT : WIN_WRITEDMA;
                 if (drive->vdma)
                     command = lba48 ? WIN_WRITE_EXT: WIN_WRITE;
@@ -270,7 +270,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
         ide_init_sg_cmd(drive, rq);
     }

-    if (rq_data_dir(rq) == READ) {
+    if (rq_uni_rw_dir(rq) == READ) {

         if (drive->mult_count) {
             hwif->data_phase = TASKFILE_MULTI_IN;
@@ -318,8 +318,8 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s

     ledtrig_ide_activity();

-    pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
-         drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
+    pr_debug("%s: %s: block=%llu, sectors=%lu, buffer=0x%08lx\n",
+         drive->name, dma_dir_to_string(rq->data_dir),
          (unsigned long long)block, rq->nr_sectors,
          (unsigned long)rq->buffer);

@@ -713,7 +713,7 @@ static int idedisk_issue_flush(request_queue_t *q, struct gendisk *disk,
     if (!drive->wcache)
         return 0;

-    rq = blk_get_request(q, WRITE, __GFP_WAIT);
+    rq = blk_get_request(q, DMA_TO_DEVICE, __GFP_WAIT);

     idedisk_prepare_flush(q, rq);

diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index fd21308..abedd21 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -207,7 +207,7 @@ int ide_build_sglist(ide_drive_t *drive, struct request *rq)

     ide_map_sg(drive, rq);

-    if (rq_data_dir(rq) == READ)
+    if (rq_uni_rw_dir(rq) == READ)
         hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
     else
         hwif->sg_dma_direction = PCI_DMA_TODEVICE;
@@ -545,7 +545,7 @@ int ide_dma_setup(ide_drive_t *drive)
     unsigned int reading;
     u8 dma_stat;

-    if (rq_data_dir(rq))
+    if (rq_uni_rw_dir(rq))
         reading = 0;
     else
         reading = 1 << 3;
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index 57cd21c..dfd1562 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -1233,7 +1233,7 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
 {
     int block = sector / floppy->bs_factor;
     int blocks = rq->nr_sectors / floppy->bs_factor;
-    int cmd = rq_data_dir(rq);
+    int cmd = rq_uni_rw_dir(rq);

     debug_log("create_rw1%d_cmd: block == %d, blocks == %d\n",
         2 * test_bit (IDEFLOPPY_USE_READ12, &floppy->flags),
@@ -1251,7 +1251,7 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
     pc->callback = &idefloppy_rw_callback;
     pc->rq = rq;
     pc->b_count = cmd == READ ? 0 : rq->bio->bi_size;
-    if (rq->cmd_flags & REQ_RW)
+    if (cmd == WRITE)
         set_bit(PC_WRITING, &pc->flags);
     pc->buffer = NULL;
     pc->request_transfer = pc->buffer_size = blocks * floppy->block_size;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 0e02800..0097e51 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -516,7 +516,8 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
         }
     }

-    if ((stat & DRQ_STAT) && rq_data_dir(rq) == READ && hwif->err_stops_fifo == 0)
+    if ((stat & DRQ_STAT) && rq_uni_rw_dir(rq) == READ &&
+        hwif->err_stops_fifo == 0)
         try_to_flush_leftover_data(drive);

     if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
@@ -1706,7 +1707,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)

 void ide_init_drive_cmd (struct request *rq)
 {
-    memset(rq, 0, sizeof(*rq));
+    blk_rq_init_unqueued_req(rq);
     rq->cmd_type = REQ_TYPE_ATA_CMD;
     rq->ref_count = 1;
 }
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 4e59239..3c33d57 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1775,7 +1775,7 @@ static void idetape_create_request_sense_cmd (idetape_pc_t *pc)

 static void idetape_init_rq(struct request *rq, u8 cmd)
 {
-    memset(rq, 0, sizeof(*rq));
+    blk_rq_init_unqueued_req(rq);
     rq->cmd_type = REQ_TYPE_SPECIAL;
     rq->cmd[0] = cmd;
 }
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 30175c7..158338f 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -473,7 +473,7 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
 {
     struct request rq;

-    memset(&rq, 0, sizeof(rq));
+    ide_init_drive_cmd(&rq);
     rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
     rq.buffer = buf;

@@ -498,8 +498,8 @@ static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long
         rq.hard_nr_sectors = rq.nr_sectors;
         rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors;

-        if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
-            rq.cmd_flags |= REQ_RW;
+        rq.data_dir = (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) ?
+            DMA_TO_DEVICE : DMA_FROM_DEVICE;
     }

     rq.special = args;
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index a6f098f..67422df 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -1242,7 +1242,7 @@ static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
     if (!(drive->dn % 2))
         ide_acpi_get_timing(hwif);

-    memset(&rq, 0, sizeof(rq));
+    ide_init_drive_cmd(&rq);
     memset(&rqpm, 0, sizeof(rqpm));
     memset(&args, 0, sizeof(args));
     rq.cmd_type = REQ_TYPE_PM_SUSPEND;
@@ -1270,7 +1270,7 @@ static int generic_ide_resume(struct device *dev)

     ide_acpi_exec_tfs(drive);

-    memset(&rq, 0, sizeof(rq));
+    ide_init_drive_cmd(&rq);
     memset(&rqpm, 0, sizeof(rqpm));
     memset(&args, 0, sizeof(args));
     rq.cmd_type = REQ_TYPE_PM_RESUME;
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 45ed035..ca8dd47 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -627,13 +627,13 @@ repeat:
         cyl, head, sec, nsect, req->buffer);
 #endif
     if (blk_fs_request(req)) {
-        switch (rq_data_dir(req)) {
-        case READ:
+        switch (rq_dma_dir(req)) {
+        case DMA_FROM_DEVICE:
             hd_out(disk,nsect,sec,head,cyl,WIN_READ,&read_intr);
             if (reset)
                 goto repeat;
             break;
-        case WRITE:
+        case DMA_TO_DEVICE:
             hd_out(disk,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
             if (reset)
                 goto repeat;
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index d54d9fe..ebf2d9b 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -248,10 +248,7 @@ static int auide_build_sglist(ide_drive_t *drive,  struct request *rq)

     ide_map_sg(drive, rq);

-    if (rq_data_dir(rq) == READ)
-        hwif->sg_dma_direction = DMA_FROM_DEVICE;
-    else
-        hwif->sg_dma_direction = DMA_TO_DEVICE;
+    hwif->sg_dma_direction = rq_dma_dir(rq);

     return dma_map_sg(ahwif->dev, sg, hwif->sg_nents,
               hwif->sg_dma_direction);
@@ -267,7 +264,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
     _auide_hwif *ahwif = (_auide_hwif*)hwif->hwif_data;
     struct scatterlist *sg;

-    iswrite = (rq_data_dir(rq) == WRITE);
+    iswrite = (rq_uni_rw_dir(rq) == WRITE);
     /* Save for interrupt context */
     ahwif->drive = drive;

diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 83e0aa6..72b3576 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -585,7 +585,7 @@ no_dma_set:
 static int ali15x3_dma_setup(ide_drive_t *drive)
 {
     if (m5229_revision < 0xC2 && drive->media != ide_disk) {
-        if (rq_data_dir(drive->hwif->hwgroup->rq))
+        if (rq_uni_rw_dir(drive->hwif->hwgroup->rq))
             return 1;    /* try PIO instead of DMA */
     }
     return ide_dma_setup(drive);
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index 60ecdc2..dd83c41 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -908,7 +908,7 @@ static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode)

 static void hpt3xxn_rw_disk(ide_drive_t *drive, struct request *rq)
 {
-    hpt3xxn_set_clock(HWIF(drive), rq_data_dir(rq) ? 0x23 : 0x21);
+    hpt3xxn_set_clock(HWIF(drive), rq_uni_rw_dir(rq) ? 0x23 : 0x21);
 }

 /*
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index a7a639f..ddd301a 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -357,7 +357,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)

         outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
         word_count = (rq->nr_sectors << 8);
-        word_count = (rq_data_dir(rq) == READ) ?
+        word_count = (rq_uni_rw_dir(rq) == READ) ?
                     word_count | 0x05000000 :
                     word_count | 0x06000000;
         outl(word_count, atapi_reg);
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index f84bf79..1edfbf6 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -398,7 +398,7 @@ static int scc_dma_setup(ide_drive_t *drive)
     unsigned int reading;
     u8 dma_stat;

-    if (rq_data_dir(rq))
+    if (rq_uni_rw_dir(rq))
         reading = 0;
     else
         reading = 1 << 3;
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index fd09b29..7533d79 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -554,7 +554,7 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
     unsigned int count = 0;
     int ddir;

-    if (rq_data_dir(rq))
+    if (rq_uni_rw_dir(rq))
         ddir = PCI_DMA_TODEVICE;
     else
         ddir = PCI_DMA_FROMDEVICE;
@@ -565,7 +565,7 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
         return 1;
     }

-    if (rq_data_dir(rq))
+    if (rq_uni_rw_dir(rq))
         /* Writes TO the IOC4 FROM Main Memory */
         ddir = IOC4_DMA_READ;
     else
diff --git a/drivers/ide/pci/trm290.c b/drivers/ide/pci/trm290.c
index cbb1b11..3c39547 100644
--- a/drivers/ide/pci/trm290.c
+++ b/drivers/ide/pci/trm290.c
@@ -191,7 +191,7 @@ static int trm290_ide_dma_setup(ide_drive_t *drive)
     struct request *rq = hwif->hwgroup->rq;
     unsigned int count, rw;

-    if (rq_data_dir(rq)) {
+    if (rq_uni_rw_dir(rq)) {
 #ifdef TRM290_NO_DMA_WRITES
         /* always use PIO for writes */
         trm290_prepare_drive(drive, 0);    /* select PIO xfer */
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 071a030..909d43d 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -1596,7 +1596,7 @@ pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq)
     pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
     volatile struct dbdma_regs __iomem *dma = pmif->dma_regs;
     struct scatterlist *sg;
-    int wr = (rq_data_dir(rq) == WRITE);
+    int wr = (rq_uni_rw_dir(rq) == WRITE);

     /* DMA table is already aligned */
     table = (struct dbdma_cmd *) pmif->dma_table_cpu;
@@ -1873,7 +1873,8 @@ pmac_ide_dma_setup(ide_drive_t *drive)

     /* Apple adds 60ns to wrDataSetup on reads */
     if (ata4 && (pmif->timings[unit] & TR_66_UDMA_EN)) {
-        writel(pmif->timings[unit] + (!rq_data_dir(rq) ? 0x00800000UL : 0),
+        writel(pmif->timings[unit] +
+            (!rq_uni_rw_dir(rq) ? 0x00800000UL : 0),
             PMAC_IDE_REG(IDE_TIMING_CONFIG));
         (void)readl(PMAC_IDE_REG(IDE_TIMING_CONFIG));
     }
diff --git a/drivers/md/dm-emc.c b/drivers/md/dm-emc.c
index 265c467..347c2b6 100644
--- a/drivers/md/dm-emc.c
+++ b/drivers/md/dm-emc.c
@@ -103,7 +103,7 @@ static struct request *get_failover_req(struct emc_handler *h,
     struct request_queue *q = bdev_get_queue(bdev);

     /* FIXME: Figure out why it fails with GFP_ATOMIC. */
-    rq = blk_get_request(q, WRITE, __GFP_WAIT);
+    rq = blk_get_request(q, DMA_TO_DEVICE, __GFP_WAIT);
     if (!rq) {
         DMERR("get_failover_req: blk_get_request failed");
         return NULL;
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index b17c4b2..1772620 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -342,7 +342,7 @@ static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
     ireq->dev = &c->pdev->dev;
     nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);

-    if (rq_data_dir(ireq->req) == READ)
+    if (rq_uni_rw_dir(ireq->req) == READ)
         direction = PCI_DMA_FROMDEVICE;
     else
         direction = PCI_DMA_TODEVICE;
@@ -362,7 +362,7 @@ static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
 {
     enum dma_data_direction direction;

-    if (rq_data_dir(ireq->req) == READ)
+    if (rq_uni_rw_dir(ireq->req) == READ)
         direction = PCI_DMA_FROMDEVICE;
     else
         direction = PCI_DMA_TODEVICE;
@@ -779,7 +779,7 @@ static int i2o_block_transfer(struct request *req)

     mptr = &msg->body[0];

-    if (rq_data_dir(req) == READ) {
+    if (rq_uni_rw_dir(req) == READ) {
         cmd = I2O_CMD_BLOCK_READ << 24;

         switch (dev->rcache) {
@@ -844,7 +844,7 @@ static int i2o_block_transfer(struct request *req)
          * SIMPLE_TAG
          * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
          */
-        if (rq_data_dir(req) == READ) {
+        if (rq_uni_rw_dir(req) == READ) {
             cmd[0] = READ_10;
             scsi_flags = 0x60a0000a;
         } else {
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index 86439a0..566d7e5 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -248,7 +248,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
         if (brq.data.blocks > card->host->max_blk_count)
             brq.data.blocks = card->host->max_blk_count;

-        mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
+        mmc_set_data_timeout(&brq.data, card,
+                             rq_uni_rw_dir(req) != READ);

         /*
          * If the host doesn't support multiple block writes, force
@@ -256,7 +257,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
          * this rule as they support querying the number of
          * successfully written sectors.
          */
-        if (rq_data_dir(req) != READ &&
+        if (rq_uni_rw_dir(req) != READ &&
             !(card->host->caps & MMC_CAP_MULTIWRITE) &&
             !mmc_card_sd(card))
             brq.data.blocks = 1;
@@ -272,7 +273,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
             writecmd = MMC_WRITE_BLOCK;
         }

-        if (rq_data_dir(req) == READ) {
+        if (rq_uni_rw_dir(req) == READ) {
             brq.cmd.opcode = readcmd;
             brq.data.flags |= MMC_DATA_READ;
         } else {
@@ -302,7 +303,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
             goto cmd_err;
         }

-        if (rq_data_dir(req) != READ) {
+        if (rq_uni_rw_dir(req) != READ) {
             do {
                 int err;

@@ -357,7 +358,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
      * For reads we just fail the entire chunk as that should
      * be safe in all cases.
      */
-     if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
+     if (rq_uni_rw_dir(req) != READ && mmc_card_sd(card)) {
         u32 blocks;
         unsigned int bytes;

@@ -371,7 +372,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
             ret = end_that_request_chunk(req, 1, bytes);
             spin_unlock_irq(&md->lock);
         }
-    } else if (rq_data_dir(req) != READ &&
+    } else if (rq_uni_rw_dir(req) != READ &&
            (card->host->caps & MMC_CAP_MULTIWRITE)) {
         spin_lock_irq(&md->lock);
         ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index b879a66..5df310e 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -53,14 +53,14 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
     if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
         return 0;

-    switch(rq_data_dir(req)) {
-    case READ:
+    switch(rq_dma_dir(req)) {
+    case DMA_FROM_DEVICE:
         for (; nsect > 0; nsect--, block++, buf += tr->blksize)
             if (tr->readsect(dev, block, buf))
                 return 0;
         return 1;

-    case WRITE:
+    case DMA_TO_DEVICE:
         if (!tr->writesect)
             return 0;

@@ -70,7 +70,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
         return 1;

     default:
-        printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
+        printk(KERN_NOTICE "Unknown request %u\n", rq_dma_dir(req));
         return 0;
     }
 }
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index eb5dc62..65ab1b1 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1215,7 +1215,7 @@ __dasd_process_blk_queue(struct dasd_device * device)
         req = elv_next_request(queue);

         if (device->features & DASD_FEATURE_READONLY &&
-            rq_data_dir(req) == WRITE) {
+            rq_rw_dir(req) == WRITE) {
             DBF_DEV_EVENT(DBF_ERR, device,
                       "Rejecting write request %p",
                       req);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index e810e4a..423debf 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -478,9 +478,9 @@ dasd_diag_build_cp(struct dasd_device * device, struct request *req)
     unsigned char rw_cmd;
     int i;

-    if (rq_data_dir(req) == READ)
+    if (rq_dma_dir(req) == DMA_FROM_DEVICE)
         rw_cmd = MDSK_READ_REQ;
-    else if (rq_data_dir(req) == WRITE)
+    else if (rq_dma_dir(req) == DMA_TO_DEVICE)
         rw_cmd = MDSK_WRITE_REQ;
     else
         return ERR_PTR(-EINVAL);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cecab22..e73a161 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1113,9 +1113,9 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
     int i;

     private = (struct dasd_eckd_private *) device->private;
-    if (rq_data_dir(req) == READ)
+    if (rq_dma_dir(req) == DMA_FROM_DEVICE)
         cmd = DASD_ECKD_CCW_READ_MT;
-    else if (rq_data_dir(req) == WRITE)
+    else if (rq_dma_dir(req) == DMA_TO_DEVICE)
         cmd = DASD_ECKD_CCW_WRITE_MT;
     else
         return ERR_PTR(-EINVAL);
@@ -1187,7 +1187,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
         if (dasd_page_cache) {
             char *copy = kmem_cache_alloc(dasd_page_cache,
                               GFP_DMA | __GFP_NOWARN);
-            if (copy && rq_data_dir(req) == WRITE)
+            if (copy && rq_uni_rw_dir(req) == WRITE)
                 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
             if (copy)
                 dst = copy + bv->bv_offset;
@@ -1203,7 +1203,7 @@ dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
                     rcmd |= 0x8;
                     count = dasd_eckd_cdl_reclen(recid);
                     if (count < blksize &&
-                        rq_data_dir(req) == READ)
+                        rq_uni_rw_dir(req) == READ)
                         memset(dst + count, 0xe5,
                                blksize - count);
                 }
@@ -1283,7 +1283,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
                 else
                     cda = (char *)((addr_t) ccw->cda);
                 if (dst != cda) {
-                    if (rq_data_dir(req) == READ)
+                    if (rq_uni_rw_dir(req) == READ)
                         memcpy(dst, cda, bv->bv_len);
                     kmem_cache_free(dasd_page_cache,
                         (void *)((addr_t)cda & PAGE_MASK));
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index be0909e..35a7bfe 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -244,9 +244,9 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
     int i;

     private = (struct dasd_fba_private *) device->private;
-    if (rq_data_dir(req) == READ) {
+    if (rq_dma_dir(req) == DMA_FROM_DEVICE) {
         cmd = DASD_FBA_CCW_READ;
-    } else if (rq_data_dir(req) == WRITE) {
+    } else if (rq_dma_dir(req) == DMA_TO_DEVICE) {
         cmd = DASD_FBA_CCW_WRITE;
     } else
         return ERR_PTR(-EINVAL);
@@ -293,7 +293,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
         return cqr;
     ccw = cqr->cpaddr;
     /* First ccw is define extent. */
-    define_extent(ccw++, cqr->data, rq_data_dir(req),
+    define_extent(ccw++, cqr->data, rq_uni_rw_dir(req),
               device->bp_block, req->sector, req->nr_sectors);
     /* Build locate_record + read/write ccws. */
     idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
@@ -301,7 +301,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
     /* Locate record for all blocks for smart devices. */
     if (private->rdc_data.mode.bits.data_chain != 0) {
         ccw[-1].flags |= CCW_FLAG_CC;
-        locate_record(ccw++, LO_data++, rq_data_dir(req), 0, count);
+        locate_record(ccw++, LO_data++, rq_uni_rw_dir(req), 0, count);
     }
     recid = first_rec;
     rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
@@ -309,7 +309,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
         if (dasd_page_cache) {
             char *copy = kmem_cache_alloc(dasd_page_cache,
                               GFP_DMA | __GFP_NOWARN);
-            if (copy && rq_data_dir(req) == WRITE)
+            if (copy && rq_uni_rw_dir(req) == WRITE)
                 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
             if (copy)
                 dst = copy + bv->bv_offset;
@@ -319,7 +319,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
             if (private->rdc_data.mode.bits.data_chain == 0) {
                 ccw[-1].flags |= CCW_FLAG_CC;
                 locate_record(ccw, LO_data++,
-                          rq_data_dir(req),
+                          rq_uni_rw_dir(req),
                           recid - first_rec, 1);
                 ccw->flags = CCW_FLAG_CC;
                 ccw++;
@@ -386,7 +386,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
                 else
                     cda = (char *)((addr_t) ccw->cda);
                 if (dst != cda) {
-                    if (rq_data_dir(req) == READ)
+                    if (rq_uni_rw_dir(req) == READ)
                         memcpy(dst, cda, bv->bv_len);
                     kmem_cache_free(dasd_page_cache,
                         (void *)((addr_t)cda & PAGE_MASK));
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index dd0ecae..e9a67d0 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -174,7 +174,7 @@ tapeblock_requeue(struct work_struct *work) {
         nr_queued < TAPEBLOCK_MIN_REQUEUE
     ) {
         req = elv_next_request(queue);
-        if (rq_data_dir(req) == WRITE) {
+        if (rq_rw_dir(req) == WRITE) {
             DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
             blkdev_dequeue_request(req);
             tapeblock_end_request(req, 0);
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 512857a..36bd378 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -199,7 +199,7 @@ static void jsfd_do_request(request_queue_t *q)
             continue;
         }

-        if (rq_data_dir(req) != READ) {
+        if (rq_rw_dir(req) != READ) {
             printk(KERN_ERR "jsfd: write\n");
             end_request(req, 0);
             continue;
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index a988d5a..2e77803 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2849,7 +2849,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
     int x, i;


-    if (rq_data_dir(cmd->request) == WRITE)
+    if (rq_uni_rw_dir(cmd->request) == WRITE)
     {
       aic_dev->w_total++;
       ptr = aic_dev->w_bins;
@@ -3858,7 +3858,7 @@ aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
       {
         printk(INFO_LEAD "Underflow - Wanted %u, %s %u, residual SG "
           "count %d.\n", p->host_no, CTL_OF_SCB(scb), cmd->underflow,
-          (rq_data_dir(cmd->request) == WRITE) ? "wrote" : "read", actual,
+          (rq_rw_dir(cmd->request) == WRITE) ? "wrote" : "read", actual,
           hscb->residual_SG_segment_count);
         printk(INFO_LEAD "status 0x%x.\n", p->host_no, CTL_OF_SCB(scb),
           hscb->target_status);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 918bb60..e4baab6 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1676,6 +1676,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
     unsigned long flags;
     int rtn;

+    blk_rq_init_unqueued_req(&req);
     scmd->request = &req;
     memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));

@@ -1688,7 +1689,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)

     scmd->cmd_len            = 0;

-    scmd->sc_data_direction        = DMA_BIDIRECTIONAL;
+    scmd->sc_data_direction        = DMA_NONE;

     init_timer(&scmd->eh_timeout);

diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9f7482d..2c5bc49 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -181,10 +181,9 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
          unsigned char *sense, int timeout, int retries, int flags)
 {
     struct request *req;
-    int write = (data_direction == DMA_TO_DEVICE);
     int ret = DRIVER_ERROR << 24;

-    req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
+    req = blk_get_request(sdev->request_queue, data_direction, __GFP_WAIT);

     if (bufflen &&    blk_rq_map_kern(sdev->request_queue, req,
                     buffer, bufflen, __GFP_WAIT))
@@ -259,8 +258,10 @@ static int scsi_merge_bio(struct request *rq, struct bio *bio)
     struct request_queue *q = rq->q;

     bio->bi_flags &= ~(1 << BIO_SEG_VALID);
-    if (rq_data_dir(rq) == WRITE)
+    if (rq_dma_dir(rq) == DMA_TO_DEVICE)
         bio->bi_rw |= (1 << BIO_RW);
+    else
+        bio->bi_rw &= ~(1 << BIO_RW);
     blk_queue_bounce(q, &bio);

     if (!rq->bio)
@@ -386,13 +387,14 @@ int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd,
     struct request *req;
     struct scsi_io_context *sioc;
     int err = 0;
-    int write = (data_direction == DMA_TO_DEVICE);

     sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp);
     if (!sioc)
         return DRIVER_ERROR << 24;

-    req = blk_get_request(sdev->request_queue, write, gfp);
+    WARN_ON((data_direction == DMA_NONE) && bufflen);
+    WARN_ON((data_direction != DMA_NONE) && !bufflen);
+    req = blk_get_request(sdev->request_queue, data_direction, gfp);
     if (!req)
         goto free_sense;
     req->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -1118,18 +1120,13 @@ static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
         cmd->request_bufflen = 0;
         cmd->request_buffer = NULL;
         cmd->use_sg = 0;
-        req->buffer = NULL;
+        req->data_dir = DMA_NONE;
     }

     BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd));
     memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
     cmd->cmd_len = req->cmd_len;
-    if (!req->data_len)
-        cmd->sc_data_direction = DMA_NONE;
-    else if (rq_data_dir(req) == WRITE)
-        cmd->sc_data_direction = DMA_TO_DEVICE;
-    else
-        cmd->sc_data_direction = DMA_FROM_DEVICE;
+    cmd->sc_data_direction = rq_dma_dir(req);

     cmd->transfersize = req->data_len;
     cmd->allowed = req->retries;
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
index d402aff..72cdde1 100644
--- a/drivers/scsi/scsi_tgt_lib.c
+++ b/drivers/scsi/scsi_tgt_lib.c
@@ -80,7 +80,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
                     enum dma_data_direction data_dir,
                     gfp_t gfp_mask)
 {
-    int write = (data_dir == DMA_TO_DEVICE);
     struct request *rq;
     struct scsi_cmnd *cmd;
     struct scsi_tgt_cmd *tcmd;
@@ -93,7 +92,7 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost,
     if (!tcmd)
         goto put_dev;

-    rq = blk_get_request(shost->uspace_req_q, write, gfp_mask);
+    rq = blk_get_request(shost->uspace_req_q, data_dir, gfp_mask);
     if (!rq)
         goto free_tcmd;

@@ -191,17 +190,8 @@ static void scsi_tgt_cmd_destroy(struct work_struct *work)
         container_of(work, struct scsi_tgt_cmd, work);
     struct scsi_cmnd *cmd = tcmd->rq->special;

-    dprintk("cmd %p %d %lu\n", cmd, cmd->sc_data_direction,
-        rq_data_dir(cmd->request));
-    /*
-     * We fix rq->cmd_flags here since when we told bio_map_user
-     * to write vm for WRITE commands, blk_rq_bio_prep set
-     * rq_data_dir the flags to READ.
-     */
-    if (cmd->sc_data_direction == DMA_TO_DEVICE)
-        cmd->request->cmd_flags |= REQ_RW;
-    else
-        cmd->request->cmd_flags &= ~REQ_RW;
+    dprintk("cmd %p %d %d\n", cmd, cmd->sc_data_direction,
+        rq_dma_dir(cmd->request));

     scsi_unmap_user_pages(tcmd);
     scsi_host_put_command(scsi_tgt_cmd_to_host(cmd), cmd);
@@ -346,7 +336,7 @@ static void scsi_tgt_cmd_done(struct scsi_cmnd *cmd)
 {
     struct scsi_tgt_cmd *tcmd = cmd->request->end_io_data;

-    dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
+    dprintk("cmd %p %d\n", cmd, rq_dma_dir(cmd->request));

     scsi_tgt_uspace_send_status(cmd, tcmd->tag);
     queue_work(scsi_tgtd, &tcmd->work);
@@ -357,7 +347,7 @@ static int __scsi_tgt_transfer_response(struct scsi_cmnd *cmd)
     struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
     int err;

-    dprintk("cmd %p %lu\n", cmd, rq_data_dir(cmd->request));
+    dprintk("cmd %p %d\n", cmd, rq_dma_dir(cmd->request));

     err = shost->hostt->transfer_response(cmd, scsi_tgt_cmd_done);
     switch (err) {
@@ -398,8 +388,8 @@ static int scsi_tgt_init_cmd(struct scsi_cmnd *cmd, gfp_t gfp_mask)

     cmd->request_bufflen = rq->data_len;

-    dprintk("cmd %p addr %p cnt %d %lu\n", cmd, tcmd->buffer, cmd->use_sg,
-        rq_data_dir(rq));
+    dprintk("cmd %p addr %p cnt %d %d\n", cmd, tcmd->buffer, cmd->use_sg,
+        rq_dma_dir(rq));
     count = blk_rq_map_sg(rq->q, rq, cmd->request_buffer);
     if (likely(count <= cmd->use_sg)) {
         cmd->use_sg = count;
@@ -617,8 +607,9 @@ int scsi_tgt_kspace_exec(int host_no, u64 tag, int result, u32 len,
     }
     cmd = rq->special;

-    dprintk("cmd %p result %d len %d bufflen %u %lu %x\n", cmd,
-        result, len, cmd->request_bufflen, rq_data_dir(rq), cmd->cmnd[0]);
+    dprintk("cmd %p result %d len %d bufflen %u %d %x\n", cmd,
+        result, len, cmd->request_bufflen, rq_dma_dir(rq),
+        cmd->cmnd[0]);

     if (result == TASK_ABORTED) {
         scsi_tgt_abort_cmd(shost, cmd);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5a8f55f..e9e60d7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -432,23 +432,27 @@ static int sd_init_command(struct scsi_cmnd * SCpnt)
             this_count = this_count >> 3;
         }
     }
-    if (rq_data_dir(rq) == WRITE) {
+
+    SCpnt->sc_data_direction = rq_dma_dir(rq);
+    switch (SCpnt->sc_data_direction) {
+    case DMA_TO_DEVICE:
         if (!sdp->writeable) {
             return 0;
         }
         SCpnt->cmnd[0] = WRITE_6;
-        SCpnt->sc_data_direction = DMA_TO_DEVICE;
-    } else if (rq_data_dir(rq) == READ) {
+        break;
+    case DMA_FROM_DEVICE:
         SCpnt->cmnd[0] = READ_6;
-        SCpnt->sc_data_direction = DMA_FROM_DEVICE;
-    } else {
-        printk(KERN_ERR "sd: Unknown command %x\n", rq->cmd_flags);
+        break;
+    default:
+        printk(KERN_ERR "sd: Unknown command %x data_dir %d\n",
+            rq->cmd_flags ,rq_dma_dir(rq));
         return 0;
     }

     SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
-        disk->disk_name, (rq_data_dir(rq) == WRITE) ?
-        "writing" : "reading", this_count, rq->nr_sectors));
+        disk->disk_name, dma_dir_to_string(rq->data_dir),
+        this_count, rq->nr_sectors));

     SCpnt->cmnd[1] = 0;

diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 81e3bc7..46a1f7e 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -733,8 +733,6 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
         data_dir = DMA_TO_DEVICE;
         break;
     case SG_DXFER_UNKNOWN:
-        data_dir = DMA_BIDIRECTIONAL;
-        break;
     default:
         data_dir = DMA_NONE;
         break;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 1857d68..40ade44 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -334,16 +334,18 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)
         return 0;
     }

-    if (rq_data_dir(SCpnt->request) == WRITE) {
+    SCpnt->sc_data_direction = rq_dma_dir(SCpnt->request);
+    switch (SCpnt->sc_data_direction) {
+    case DMA_TO_DEVICE:
         if (!cd->device->writeable)
             return 0;
         SCpnt->cmnd[0] = WRITE_10;
-        SCpnt->sc_data_direction = DMA_TO_DEVICE;
           cd->cdi.media_written = 1;
-    } else if (rq_data_dir(SCpnt->request) == READ) {
+        break;
+    case DMA_FROM_DEVICE:
         SCpnt->cmnd[0] = READ_10;
-        SCpnt->sc_data_direction = DMA_FROM_DEVICE;
-    } else {
+        break;
+    default:
         blk_dump_rq_flags(SCpnt->request, "Unknown sr command");
         return 0;
     }
@@ -377,8 +379,7 @@ static int sr_init_command(struct scsi_cmnd * SCpnt)

     SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
                 cd->cdi.name,
-                (rq_data_dir(SCpnt->request) == WRITE) ?
-                    "writing" : "reading",
+                dma_dir_to_string(SCpnt->request->data_dir),
                 this_count, SCpnt->request->nr_sectors));

     SCpnt->cmnd[1] = 0;
diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c
index 98e3fe1..6d18b33 100644
--- a/drivers/scsi/sun3_NCR5380.c
+++ b/drivers/scsi/sun3_NCR5380.c
@@ -1206,7 +1206,7 @@ static void NCR5380_dma_complete( struct Scsi_Host *instance )
            HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
            NCR5380_read(STATUS_REG));

-    if((sun3scsi_dma_finish(rq_data_dir(hostdata->connected->request)))) {
+    if((sun3scsi_dma_finish(rq_uni_rw_dir(hostdata->connected->request)))) {
         printk("scsi%d: overrun in UDC counter -- not prepared to deal with this!\n", HOSTNO);
         printk("please e-mail sammy@xxxxxxxxx with a description of how this\n");
         printk("error was produced.\n");
@@ -2024,7 +2024,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
         {
             if(blk_fs_request(cmd->request)) {
                 sun3scsi_dma_setup(d, count,
-                           rq_data_dir(cmd->request));
+                           rq_uni_rw_dir(cmd->request));
                 sun3_dma_setup_done = cmd;
             }
         }
@@ -2636,7 +2636,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
         /* setup this command for dma if not already */
         if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done != tmp))
         {
-            sun3scsi_dma_setup(d, count, rq_data_dir(tmp->request));
+            sun3scsi_dma_setup(d, count, rq_uni_rw_dir(tmp->request));
             sun3_dma_setup_done = tmp;
         }
 #endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 83dcd8c..0969008 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -14,6 +14,7 @@
 #include <linux/bio.h>
 #include <linux/module.h>
 #include <linux/stringify.h>
+#include <linux/dma-mapping.h>

 #include <asm/scatterlist.h>

@@ -177,10 +178,9 @@ enum {
 };

 /*
- * request type modified bits. first three bits match BIO_RW* bits, important
+ * request type modified bits.
  */
 enum rq_flag_bits {
-    __REQ_RW,        /* not set, read. set, write */
     __REQ_FAILFAST,        /* no low level driver retries */
     __REQ_SORTED,        /* elevator knows about this request */
     __REQ_SOFTBARRIER,    /* may not be passed by ioscheduler */
@@ -199,9 +199,10 @@ enum rq_flag_bits {
     __REQ_ALLOCED,        /* request came from our alloc pool */
     __REQ_RW_META,        /* metadata io request */
     __REQ_NR_BITS,        /* stops here */
+    __REQ_BIDI,        /* FIXME: Will be removed. It is only for some quirks checking */
 };

-#define REQ_RW        (1 << __REQ_RW)
+#define REQ_BIDI    (1 << __REQ_BIDI)
 #define REQ_FAILFAST    (1 << __REQ_FAILFAST)
 #define REQ_SORTED    (1 << __REQ_SORTED)
 #define REQ_SOFTBARRIER    (1 << __REQ_SOFTBARRIER)
@@ -232,6 +233,7 @@ struct request {
     request_queue_t *q;

     unsigned int cmd_flags;
+    enum dma_data_direction data_dir;
     enum rq_cmd_type_bits cmd_type;

     /* Maintain bio traversal state for part by part I/O submission.
@@ -545,12 +547,46 @@ enum {

 #define list_entry_rq(ptr)    list_entry((ptr), struct request, queuelist)

-#define rq_data_dir(rq)        ((rq)->cmd_flags & 1)
+#define rq_dma_dir(rq)        ((rq)->data_dir)
+#define rq_uni_dir(rq)        dma_uni_dir((rq)->data_dir)
+
+static inline int rq_bidi_dir(struct request* rq)
+{
+    /*
+     * FIXME: the (req->cmd_flags & REQ_BIDI) will be removed once all
+     * the warnings go away.
+     */
+    return (rq_dma_dir(rq) == DMA_BIDIRECTIONAL) &&
+        (rq->cmd_flags & REQ_BIDI);
+}
+
+static inline int rq_rw_dir(struct request* rq)
+{
+    return dma_write_dir(rq->data_dir) ? WRITE : READ;
+}
+
+static inline int rq_uni_rw_dir(struct request* rq)
+{
+    WARN_ON(!dma_uni_dir(rq->data_dir));
+    return (rq->data_dir == DMA_TO_DEVICE) ? WRITE : READ;
+}
+
+/*
+ * DMA_BIDIRECTIONAL==0, and some drivers just bzero the request, so this will
+ * catch these cases. One must use blk_rq_init_unqueued_req() for cases that
+ * a request did not come from a request_queue.
+ */
+#define WARN_ON_BIDI_FLAG(req) \
+    WARN_ON( \
+        (rq_dma_dir(req) == DMA_BIDIRECTIONAL) != \
+        ((req->cmd_flags & REQ_BIDI) != 0) \
+    )

 /*
  * We regard a request as sync, if it's a READ or a SYNC write.
  */
-#define rq_is_sync(rq)        (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC)
+#define rq_is_sync(rq)        (rq_dma_dir(rq) == DMA_FROM_DEVICE || \
+                                 (rq)->cmd_flags & REQ_RW_SYNC)
 #define rq_is_meta(rq)        ((rq)->cmd_flags & REQ_RW_META)

 static inline int blk_queue_full(struct request_queue *q, int rw)
@@ -584,7 +620,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
 #define RQ_NOMERGE_FLAGS    \
     (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER)
 #define rq_mergeable(rq)    \
-    (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
+    (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)) && \
+    ((rq)->data_dir != DMA_BIDIRECTIONAL))

 /*
  * q->prep_rq_fn return values
@@ -630,7 +667,9 @@ extern void generic_make_request(struct bio *bio);
 extern void blk_put_request(struct request *);
 extern void __blk_put_request(request_queue_t *, struct request *);
 extern void blk_end_sync_rq(struct request *rq, int error);
-extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
+extern struct request *blk_get_request(request_queue_t *,
+                                       enum dma_data_direction, gfp_t);
+extern void blk_rq_init_unqueued_req(struct request *);
 extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
 extern void blk_requeue_request(request_queue_t *, struct request *);
 extern void blk_plug_device(request_queue_t *);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3680ff9..d9665b1 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -161,7 +161,13 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
                     u32 what)
 {
     struct blk_trace *bt = q->blk_trace;
-    int rw = rq->cmd_flags & 0x03;
+    /* blktrace.c prints them according to bio flags */
+    int rw = (((rq_rw_dir(rq) == WRITE) << BIO_RW) |
+              (((rq->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER)) != 0) <<
+               BIO_RW_BARRIER) |
+              (((rq->cmd_flags & REQ_FAILFAST) != 0) << BIO_RW_FAILFAST) |
+              (((rq->cmd_flags & REQ_RW_SYNC) != 0) << BIO_RW_SYNC) |
+              (((rq->cmd_flags & REQ_RW_META) != 0) << BIO_RW_META));

     if (likely(!bt))
         return;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 9a663c6..0d22fd3 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -13,6 +13,28 @@ enum dma_data_direction {
     DMA_NONE = 3,
 };

+static inline int dma_write_dir(enum dma_data_direction dir)
+{
+    return (dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL);
+}
+
+static inline int dma_uni_dir(enum dma_data_direction dir)
+{
+    return (dir == DMA_TO_DEVICE) || (dir == DMA_FROM_DEVICE) ||
+           (dir == DMA_NONE);
+}
+
+static inline char* dma_dir_to_string(enum dma_data_direction dir)
+{
+    switch(dir){
+    case DMA_BIDIRECTIONAL:   return "bidirectional";
+    case DMA_TO_DEVICE:       return "writing";
+    case DMA_FROM_DEVICE:     return "reading";
+    case DMA_NONE:            return "no-data";
+    default:                  return "invalid";
+    }
+}
+
 #define DMA_64BIT_MASK    0xffffffffffffffffULL
 #define DMA_48BIT_MASK    0x0000ffffffffffffULL
 #define DMA_40BIT_MASK    0x000000ffffffffffULL
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e88fcbc..c947f71 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -20,7 +20,7 @@ typedef void (elevator_add_req_fn) (request_queue_t *, struct request *);
 typedef int (elevator_queue_empty_fn) (request_queue_t *);
 typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *);
 typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *);
-typedef int (elevator_may_queue_fn) (request_queue_t *, int);
+typedef int (elevator_may_queue_fn) (request_queue_t *, int, int);

 typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, gfp_t);
 typedef void (elevator_put_req_fn) (struct request *);
@@ -111,7 +111,7 @@ extern struct request *elv_former_request(request_queue_t *, struct request *);
 extern struct request *elv_latter_request(request_queue_t *, struct request *);
 extern int elv_register_queue(request_queue_t *q);
 extern void elv_unregister_queue(request_queue_t *q);
-extern int elv_may_queue(request_queue_t *, int);
+extern int elv_may_queue(request_queue_t *, int, int);
 extern void elv_completed_request(request_queue_t *, struct request *);
 extern int elv_set_request(request_queue_t *, struct request *, gfp_t);
 extern void elv_put_request(request_queue_t *, struct request *);
-- 
1.5.0.4.402.g8035



-
To unsubscribe from this list: send the line "unsubscribe linux-scsi" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [SCSI Target Devel]     [Linux SCSI Target Infrastructure]     [Kernel Newbies]     [IDE]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux ATA RAID]     [Linux IIO]     [Samba]     [Device Mapper]
  Powered by Linux