1
0
Fork 0

Merge branch 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block: (153 commits)
  block: add request clone interface (v2)
  floppy: fix hibernation
  ramdisk: remove long-deprecated "ramdisk=" boot-time parameter
  fs/bio.c: add missing __user annotation
  block: prevent possible io_context->refcount overflow
  Add serial number support for virtio_blk, V4a
  block: Add missing bounce_pfn stacking and fix comments
  Revert "block: Fix bounce limit setting in DM"
  cciss: decode unit attention in SCSI error handling code
  cciss: Remove no longer needed sendcmd reject processing code
  cciss: change SCSI error handling routines to work with interrupts enabled.
  cciss: separate error processing and command retrying code in sendcmd_withirq_core()
  cciss: factor out fix target status processing code from sendcmd functions
  cciss: simplify interface of sendcmd() and sendcmd_withirq()
  cciss: factor out core of sendcmd_withirq() for use by SCSI error handling code
  cciss: Use schedule_timeout_uninterruptible in SCSI error handling code
  block: needs to set the residual length of a bidi request
  Revert "block: implement blkdev_readpages"
  block: Fix bounce limit setting in DM
  Removed reference to non-existing file Documentation/PCI/PCI-DMA-mapping.txt
  ...

Manually fix conflicts with tracing updates in:
	block/blk-sysfs.c
	drivers/ide/ide-atapi.c
	drivers/ide/ide-cd.c
	drivers/ide/ide-floppy.c
	drivers/ide/ide-tape.c
	include/trace/events/block.h
	kernel/trace/blktrace.c
hifive-unleashed-5.1
Linus Torvalds 2009-06-11 10:52:27 -07:00
commit c9059598ea
158 changed files with 3801 additions and 2771 deletions

View File

@ -60,3 +60,62 @@ Description:
Indicates whether the block layer should automatically
generate checksums for write requests bound for
devices that support receiving integrity metadata.
What: /sys/block/<disk>/alignment_offset
Date: April 2009
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Storage devices may report a physical block size that is
bigger than the logical block size (for instance a drive
with 4KB physical sectors exposing 512-byte logical
blocks to the operating system). This parameter
indicates how many bytes the beginning of the device is
offset from the disk's natural alignment.
What: /sys/block/<disk>/<partition>/alignment_offset
Date: April 2009
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Storage devices may report a physical block size that is
bigger than the logical block size (for instance a drive
with 4KB physical sectors exposing 512-byte logical
blocks to the operating system). This parameter
indicates how many bytes the beginning of the partition
is offset from the disk's natural alignment.
What: /sys/block/<disk>/queue/logical_block_size
Date: May 2009
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
This is the smallest unit the storage device can
address. It is typically 512 bytes.
What: /sys/block/<disk>/queue/physical_block_size
Date: May 2009
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
This is the smallest unit the storage device can write
without resorting to read-modify-write operation. It is
usually the same as the logical block size but may be
bigger. One example is SATA drives with 4KB sectors
that expose a 512-byte logical block size to the
operating system.
What: /sys/block/<disk>/queue/minimum_io_size
Date: April 2009
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Storage devices may report a preferred minimum I/O size,
which is the smallest request the device can perform
without incurring a read-modify-write penalty. For disk
drives this is often the physical block size. For RAID
arrays it is often the stripe chunk size.
What: /sys/block/<disk>/queue/optimal_io_size
Date: April 2009
Contact: Martin K. Petersen <martin.petersen@oracle.com>
Description:
Storage devices may report an optimal I/O size, which is
the device's preferred unit of receiving I/O. This is
rarely reported for disk drives. For RAID devices it is
usually the stripe width or the internal block size.

View File

@ -0,0 +1,33 @@
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/model
Date: March 2009
Kernel Version: 2.6.30
Contact: iss_storagedev@hp.com
Description: Displays the SCSI INQUIRY page 0 model for logical drive
Y of controller X.
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/rev
Date: March 2009
Kernel Version: 2.6.30
Contact: iss_storagedev@hp.com
Description: Displays the SCSI INQUIRY page 0 revision for logical
drive Y of controller X.
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/unique_id
Date: March 2009
Kernel Version: 2.6.30
Contact: iss_storagedev@hp.com
Description: Displays the SCSI INQUIRY page 83 serial number for logical
drive Y of controller X.
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/vendor
Date: March 2009
Kernel Version: 2.6.30
Contact: iss_storagedev@hp.com
Description: Displays the SCSI INQUIRY page 0 vendor for logical drive
Y of controller X.
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/block:cciss!cXdY
Date: March 2009
Kernel Version: 2.6.30
Contact: iss_storagedev@hp.com
Description: A symbolic link to /sys/block/cciss!cXdY

View File

@ -186,7 +186,7 @@ a virtual address mapping (unlike the earlier scheme of virtual address
do not have a corresponding kernel virtual address space mapping) and
low-memory pages.
Note: Please refer to Documentation/PCI/PCI-DMA-mapping.txt for a discussion
Note: Please refer to Documentation/DMA-mapping.txt for a discussion
on PCI high mem DMA aspects and mapping of scatter gather lists, and support
for 64 bit PCI.

View File

@ -147,24 +147,40 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void *arg)
return ret;
}
struct omap_msg_tx_data {
mbox_msg_t msg;
void *arg;
};
static void omap_msg_tx_end_io(struct request *rq, int error)
{
kfree(rq->special);
__blk_put_request(rq->q, rq);
}
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg)
{
struct omap_msg_tx_data *tx_data;
struct request *rq;
struct request_queue *q = mbox->txq->queue;
int ret = 0;
tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
if (unlikely(!tx_data))
return -ENOMEM;
rq = blk_get_request(q, WRITE, GFP_ATOMIC);
if (unlikely(!rq)) {
ret = -ENOMEM;
goto fail;
kfree(tx_data);
return -ENOMEM;
}
rq->data = (void *)msg;
blk_insert_request(q, rq, 0, arg);
tx_data->msg = msg;
tx_data->arg = arg;
rq->end_io = omap_msg_tx_end_io;
blk_insert_request(q, rq, 0, tx_data);
schedule_work(&mbox->txq->work);
fail:
return ret;
return 0;
}
EXPORT_SYMBOL(omap_mbox_msg_send);
@ -178,22 +194,28 @@ static void mbox_tx_work(struct work_struct *work)
struct request_queue *q = mbox->txq->queue;
while (1) {
struct omap_msg_tx_data *tx_data;
spin_lock(q->queue_lock);
rq = elv_next_request(q);
rq = blk_fetch_request(q);
spin_unlock(q->queue_lock);
if (!rq)
break;
ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special);
tx_data = rq->special;
ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg);
if (ret) {
enable_mbox_irq(mbox, IRQ_TX);
spin_lock(q->queue_lock);
blk_requeue_request(q, rq);
spin_unlock(q->queue_lock);
return;
}
spin_lock(q->queue_lock);
if (__blk_end_request(rq, 0, 0))
BUG();
__blk_end_request_all(rq, 0);
spin_unlock(q->queue_lock);
}
}
@ -218,16 +240,13 @@ static void mbox_rx_work(struct work_struct *work)
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
rq = elv_next_request(q);
rq = blk_fetch_request(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (!rq)
break;
msg = (mbox_msg_t) rq->data;
if (blk_end_request(rq, 0, 0))
BUG();
msg = (mbox_msg_t)rq->special;
blk_end_request_all(rq, 0);
mbox->rxq->callback((void *)msg);
}
}
@ -264,7 +283,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
goto nomem;
msg = mbox_fifo_read(mbox);
rq->data = (void *)msg;
if (unlikely(mbox_seq_test(mbox, msg))) {
pr_info("mbox: Illegal seq bit!(%08x)\n", msg);
@ -272,7 +290,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
mbox->err_notify();
}
blk_insert_request(q, rq, 0, NULL);
blk_insert_request(q, rq, 0, (void *)msg);
if (mbox->ops->type == OMAP_MBOX_TYPE1)
break;
}
@ -329,16 +347,15 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
rq = elv_next_request(q);
rq = blk_fetch_request(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (!rq)
break;
*p = (mbox_msg_t) rq->data;
*p = (mbox_msg_t)rq->special;
if (blk_end_request(rq, 0, 0))
BUG();
blk_end_request_all(rq, 0);
if (unlikely(mbox_seq_test(mbox, *p))) {
pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);

View File

@ -250,7 +250,7 @@ axon_ram_probe(struct of_device *device, const struct of_device_id *device_id)
set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT);
blk_queue_make_request(bank->disk->queue, axon_ram_make_request);
blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE);
add_disk(bank->disk);
bank->irq_id = irq_of_parse_and_map(device->node, 0);

View File

@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);
/* Only changed by ubd_init, which is an initcall. */
static int thread_fd = -1;
static void ubd_end_request(struct request *req, int bytes, int error)
{
blk_end_request(req, error, bytes);
}
/* Callable only from interrupt context - otherwise you need to do
* spin_lock_irq()/spin_lock_irqsave() */
static inline void ubd_finish(struct request *req, int bytes)
{
if(bytes < 0){
ubd_end_request(req, 0, -EIO);
return;
}
ubd_end_request(req, bytes, 0);
}
static LIST_HEAD(restart);
/* XXX - move this inside ubd_intr. */
@ -475,7 +458,6 @@ static LIST_HEAD(restart);
static void ubd_handler(void)
{
struct io_thread_req *req;
struct request *rq;
struct ubd *ubd;
struct list_head *list, *next_ele;
unsigned long flags;
@ -492,10 +474,7 @@ static void ubd_handler(void)
return;
}
rq = req->req;
rq->nr_sectors -= req->length >> 9;
if(rq->nr_sectors == 0)
ubd_finish(rq, rq->hard_nr_sectors << 9);
blk_end_request(req->req, 0, req->length);
kfree(req);
}
reactivate_fd(thread_fd, UBD_IRQ);
@ -1243,27 +1222,26 @@ static void do_ubd_request(struct request_queue *q)
{
struct io_thread_req *io_req;
struct request *req;
int n, last_sectors;
sector_t sector;
int n;
while(1){
struct ubd *dev = q->queuedata;
if(dev->end_sg == 0){
struct request *req = elv_next_request(q);
struct request *req = blk_fetch_request(q);
if(req == NULL)
return;
dev->request = req;
blkdev_dequeue_request(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}
req = dev->request;
last_sectors = 0;
sector = blk_rq_pos(req);
while(dev->start_sg < dev->end_sg){
struct scatterlist *sg = &dev->sg[dev->start_sg];
req->sector += last_sectors;
io_req = kmalloc(sizeof(struct io_thread_req),
GFP_ATOMIC);
if(io_req == NULL){
@ -1272,10 +1250,10 @@ static void do_ubd_request(struct request_queue *q)
return;
}
prepare_request(req, io_req,
(unsigned long long) req->sector << 9,
(unsigned long long)sector << 9,
sg->offset, sg->length, sg_page(sg));
last_sectors = sg->length >> 9;
sector += sg->length >> 9;
n = os_write_file(thread_fd, &io_req,
sizeof(struct io_thread_req *));
if(n != sizeof(struct io_thread_req *)){

View File

@ -26,6 +26,7 @@ if BLOCK
config LBD
bool "Support for large block devices and files"
depends on !64BIT
default y
help
Enable block devices or files of size 2TB and larger.
@ -38,11 +39,13 @@ config LBD
The ext4 filesystem requires that this feature be enabled in
order to support filesystems that have the huge_file feature
enabled. Otherwise, it will refuse to mount any filesystems
that use the huge_file feature, which is enabled by default
by mke2fs.ext4. The GFS2 filesystem also requires this feature.
enabled. Otherwise, it will refuse to mount in the read-write
mode any filesystems that use the huge_file feature, which is
enabled by default by mke2fs.ext4.
If unsure, say N.
The GFS2 filesystem also requires this feature.
If unsure, say Y.
config BLK_DEV_BSG
bool "Block layer SG support v4 (EXPERIMENTAL)"

View File

@ -306,8 +306,8 @@ as_choose_req(struct as_data *ad, struct request *rq1, struct request *rq2)
data_dir = rq_is_sync(rq1);
last = ad->last_sector[data_dir];
s1 = rq1->sector;
s2 = rq2->sector;
s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2);
BUG_ON(data_dir != rq_is_sync(rq2));
@ -566,13 +566,15 @@ static void as_update_iohist(struct as_data *ad, struct as_io_context *aic,
as_update_thinktime(ad, aic, thinktime);
/* Calculate read -> read seek distance */
if (aic->last_request_pos < rq->sector)
seek_dist = rq->sector - aic->last_request_pos;
if (aic->last_request_pos < blk_rq_pos(rq))
seek_dist = blk_rq_pos(rq) -
aic->last_request_pos;
else
seek_dist = aic->last_request_pos - rq->sector;
seek_dist = aic->last_request_pos -
blk_rq_pos(rq);
as_update_seekdist(ad, aic, seek_dist);
}
aic->last_request_pos = rq->sector + rq->nr_sectors;
aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
set_bit(AS_TASK_IOSTARTED, &aic->state);
spin_unlock(&aic->lock);
}
@ -587,7 +589,7 @@ static int as_close_req(struct as_data *ad, struct as_io_context *aic,
{
unsigned long delay; /* jiffies */
sector_t last = ad->last_sector[ad->batch_data_dir];
sector_t next = rq->sector;
sector_t next = blk_rq_pos(rq);
sector_t delta; /* acceptable close offset (in sectors) */
sector_t s;
@ -981,7 +983,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct request *rq)
* This has to be set in order to be correctly updated by
* as_find_next_rq
*/
ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (data_dir == BLK_RW_SYNC) {
struct io_context *ioc = RQ_IOC(rq);
@ -1312,12 +1314,8 @@ static void as_merged_requests(struct request_queue *q, struct request *req,
static void as_work_handler(struct work_struct *work)
{
struct as_data *ad = container_of(work, struct as_data, antic_work);
struct request_queue *q = ad->q;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queueing(q);
spin_unlock_irqrestore(q->queue_lock, flags);
blk_run_queue(ad->q);
}
static int as_may_queue(struct request_queue *q, int rw)

View File

@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
*/
q->ordseq = 0;
rq = q->orig_bar_rq;
if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
BUG();
__blk_end_request_all(rq, q->orderr);
return true;
}
@ -166,7 +163,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
* For an empty barrier, there's no actual BAR request, which
* in turn makes POSTFLUSH unnecessary. Mask them off.
*/
if (!rq->hard_nr_sectors) {
if (!blk_rq_sectors(rq)) {
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
QUEUE_ORDERED_DO_POSTFLUSH);
/*
@ -183,7 +180,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
}
/* stash away the original request */
elv_dequeue_request(q, rq);
blk_dequeue_request(rq);
q->orig_bar_rq = rq;
rq = NULL;
@ -221,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
} else
skip |= QUEUE_ORDSEQ_PREFLUSH;
if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
rq = NULL;
else
skip |= QUEUE_ORDSEQ_DRAIN;
@ -251,10 +248,8 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
* Queue ordering not supported. Terminate
* with prejudice.
*/
elv_dequeue_request(q, rq);
if (__blk_end_request(rq, -EOPNOTSUPP,
blk_rq_bytes(rq)))
BUG();
blk_dequeue_request(rq);
__blk_end_request_all(rq, -EOPNOTSUPP);
*rqp = NULL;
return false;
}
@ -329,7 +324,7 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
/*
* The driver must store the error location in ->bi_sector, if
* it supports it. For non-stacked drivers, this should be copied
* from rq->sector.
* from blk_rq_pos(rq).
*/
if (error_sector)
*error_sector = bio->bi_sector;
@ -393,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
bio->bi_sector = sector;
if (nr_sects > q->max_hw_sectors) {
bio->bi_size = q->max_hw_sectors << 9;
nr_sects -= q->max_hw_sectors;
sector += q->max_hw_sectors;
if (nr_sects > queue_max_hw_sectors(q)) {
bio->bi_size = queue_max_hw_sectors(q) << 9;
nr_sects -= queue_max_hw_sectors(q);
sector += queue_max_hw_sectors(q);
} else {
bio->bi_size = nr_sects << 9;
nr_sects = 0;

File diff suppressed because it is too large Load Diff

View File

@ -51,7 +51,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
rq->rq_disk = bd_disk;
rq->cmd_flags |= REQ_NOMERGE;
rq->end_io = done;
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);

View File

@ -340,7 +340,7 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template)
kobject_uevent(&bi->kobj, KOBJ_ADD);
bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE;
bi->sector_size = disk->queue->hardsect_size;
bi->sector_size = queue_logical_block_size(disk->queue);
disk->integrity = bi;
} else
bi = disk->integrity;

View File

@ -35,9 +35,9 @@ int put_io_context(struct io_context *ioc)
if (ioc == NULL)
return 1;
BUG_ON(atomic_read(&ioc->refcount) == 0);
BUG_ON(atomic_long_read(&ioc->refcount) == 0);
if (atomic_dec_and_test(&ioc->refcount)) {
if (atomic_long_dec_and_test(&ioc->refcount)) {
rcu_read_lock();
if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic);
@ -90,7 +90,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
if (ret) {
atomic_set(&ret->refcount, 1);
atomic_long_set(&ret->refcount, 1);
atomic_set(&ret->nr_tasks, 1);
spin_lock_init(&ret->lock);
ret->ioprio_changed = 0;
@ -151,7 +151,7 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
ret = current_io_context(gfp_flags, node);
if (unlikely(!ret))
break;
} while (!atomic_inc_not_zero(&ret->refcount));
} while (!atomic_long_inc_not_zero(&ret->refcount));
return ret;
}
@ -163,8 +163,8 @@ void copy_io_context(struct io_context **pdst, struct io_context **psrc)
struct io_context *dst = *pdst;
if (src) {
BUG_ON(atomic_read(&src->refcount) == 0);
atomic_inc(&src->refcount);
BUG_ON(atomic_long_read(&src->refcount) == 0);
atomic_long_inc(&src->refcount);
put_io_context(dst);
*pdst = src;
}

View File

@ -20,11 +20,10 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
rq->data_len += bio->bi_size;
rq->__data_len += bio->bi_size;
}
return 0;
}
EXPORT_SYMBOL(blk_rq_append_bio);
static int __blk_rq_unmap_user(struct bio *bio)
{
@ -116,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
struct bio *bio = NULL;
int ret;
if (len > (q->max_hw_sectors << 9))
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len)
return -EINVAL;
@ -156,7 +155,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
rq->buffer = rq->data = NULL;
rq->buffer = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(bio);
@ -235,7 +234,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = rq->data = NULL;
rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
@ -282,7 +281,8 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
*
* Description:
* Data will be mapped directly if possible. Otherwise a bounce
* buffer is used.
* buffer is used. Can be called multple times to append multple
* buffers.
*/
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
@ -290,8 +290,9 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
int reading = rq_data_dir(rq) == READ;
int do_copy = 0;
struct bio *bio;
int ret;
if (len > (q->max_hw_sectors << 9))
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
@ -311,9 +312,15 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
blk_rq_bio_prep(q, rq, bio);
ret = blk_rq_append_bio(q, rq, bio);
if (unlikely(ret)) {
/* request is too big */
bio_put(bio);
return ret;
}
blk_queue_bounce(q, &rq->bio);
rq->buffer = rq->data = NULL;
rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);

View File

@ -9,35 +9,6 @@
#include "blk.h"
void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (blk_fs_request(rq) || blk_discard_rq(rq)) {
rq->hard_sector += nsect;
rq->hard_nr_sectors -= nsect;
/*
* Move the I/O submission pointers ahead if required.
*/
if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
(rq->sector <= rq->hard_sector)) {
rq->sector = rq->hard_sector;
rq->nr_sectors = rq->hard_nr_sectors;
rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
rq->current_nr_sectors = rq->hard_cur_sectors;
rq->buffer = bio_data(rq->bio);
}
/*
* if total number of sectors is less than the first segment
* size, something has gone terribly wrong
*/
if (rq->nr_sectors < rq->current_nr_sectors) {
printk(KERN_ERR "blk: request botched\n");
rq->nr_sectors = rq->current_nr_sectors;
}
}
}
static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio)
{
@ -61,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
* never considered part of another segment, since that
* might change with the bounce page.
*/
high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
if (high || highprv)
goto new_segment;
if (cluster) {
if (seg_size + bv->bv_len > q->max_segment_size)
if (seg_size + bv->bv_len
> queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
goto new_segment;
@ -120,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
return 0;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
q->max_segment_size)
queue_max_segment_size(q))
return 0;
if (!bio_has_data(bio))
@ -163,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
if (sg->length + nbytes > q->max_segment_size)
if (sg->length + nbytes > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@ -199,8 +171,9 @@ new_segment:
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(rq->data_len & q->dma_pad_mask)) {
unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
unsigned int pad_len =
(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
sg->length += pad_len;
rq->extra_len += pad_len;
@ -233,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
{
int nr_phys_segs = bio_phys_segments(q, bio);
if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@ -255,11 +228,11 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
max_sectors = queue_max_hw_sectors(q);
else
max_sectors = q->max_sectors;
max_sectors = queue_max_sectors(q);
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@ -279,12 +252,12 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
max_sectors = q->max_hw_sectors;
max_sectors = queue_max_hw_sectors(q);
else
max_sectors = q->max_sectors;
max_sectors = queue_max_sectors(q);
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@ -315,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/*
* Will it become too large?
*/
if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@ -327,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_phys_segments--;
}
if (total_phys_segments > q->max_phys_segments)
if (total_phys_segments > queue_max_phys_segments(q))
return 0;
if (total_phys_segments > q->max_hw_segments)
if (total_phys_segments > queue_max_hw_segments(q))
return 0;
/* Merge is OK... */
@ -345,7 +318,7 @@ static void blk_account_io_merge(struct request *req)
int cpu;
cpu = part_stat_lock();
part = disk_map_sector_rcu(req->rq_disk, req->sector);
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
part_round_stats(cpu, part);
part_dec_in_flight(part);
@ -366,7 +339,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
/*
* not contiguous
*/
if (req->sector + req->nr_sectors != next->sector)
if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
return 0;
if (rq_data_dir(req) != rq_data_dir(next)
@ -398,7 +371,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = next->bio;
req->biotail = next->biotail;
req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
req->__data_len += blk_rq_bytes(next);
elv_merge_requests(q, req, next);

View File

@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
blk_queue_logical_block_size(q, 512);
blk_queue_dma_alignment(q, 511);
blk_queue_congestion_threshold(q);
q->nr_batching = BLK_BATCH_REQ;
@ -179,16 +179,16 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
*/
if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
q->bounce_pfn = max_low_pfn;
q->limits.bounce_pfn = max_low_pfn;
#else
if (b_pfn < blk_max_low_pfn)
dma = 1;
q->bounce_pfn = b_pfn;
q->limits.bounce_pfn = b_pfn;
#endif
if (dma) {
init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA;
q->bounce_pfn = b_pfn;
q->limits.bounce_pfn = b_pfn;
}
}
EXPORT_SYMBOL(blk_queue_bounce_limit);
@ -211,14 +211,23 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
}
if (BLK_DEF_MAX_SECTORS > max_sectors)
q->max_hw_sectors = q->max_sectors = max_sectors;
q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors;
else {
q->max_sectors = BLK_DEF_MAX_SECTORS;
q->max_hw_sectors = max_sectors;
q->limits.max_sectors = BLK_DEF_MAX_SECTORS;
q->limits.max_hw_sectors = max_sectors;
}
}
EXPORT_SYMBOL(blk_queue_max_sectors);
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
{
if (BLK_DEF_MAX_SECTORS > max_sectors)
q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS;
else
q->limits.max_hw_sectors = max_sectors;
}
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
/**
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
* @q: the request queue for the device
@ -238,7 +247,7 @@ void blk_queue_max_phys_segments(struct request_queue *q,
__func__, max_segments);
}
q->max_phys_segments = max_segments;
q->limits.max_phys_segments = max_segments;
}
EXPORT_SYMBOL(blk_queue_max_phys_segments);
@ -262,7 +271,7 @@ void blk_queue_max_hw_segments(struct request_queue *q,
__func__, max_segments);
}
q->max_hw_segments = max_segments;
q->limits.max_hw_segments = max_segments;
}
EXPORT_SYMBOL(blk_queue_max_hw_segments);
@ -283,26 +292,110 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
__func__, max_size);
}
q->max_segment_size = max_size;
q->limits.max_segment_size = max_size;
}
EXPORT_SYMBOL(blk_queue_max_segment_size);
/**
* blk_queue_hardsect_size - set hardware sector size for the queue
* blk_queue_logical_block_size - set logical block size for the queue
* @q: the request queue for the device
* @size: the hardware sector size, in bytes
* @size: the logical block size, in bytes
*
* Description:
* This should typically be set to the lowest possible sector size
* that the hardware can operate on (possible without reverting to
* even internal read-modify-write operations). Usually the default
* of 512 covers most hardware.
* This should be set to the lowest possible block size that the
* storage device can address. The default of 512 covers most
* hardware.
**/
void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
{
q->hardsect_size = size;
q->limits.logical_block_size = size;
if (q->limits.physical_block_size < size)
q->limits.physical_block_size = size;
if (q->limits.io_min < q->limits.physical_block_size)
q->limits.io_min = q->limits.physical_block_size;
}
EXPORT_SYMBOL(blk_queue_hardsect_size);
EXPORT_SYMBOL(blk_queue_logical_block_size);
/**
* blk_queue_physical_block_size - set physical block size for the queue
* @q: the request queue for the device
* @size: the physical block size, in bytes
*
* Description:
* This should be set to the lowest possible sector size that the
* hardware can operate on without reverting to read-modify-write
* operations.
*/
void blk_queue_physical_block_size(struct request_queue *q, unsigned short size)
{
q->limits.physical_block_size = size;
if (q->limits.physical_block_size < q->limits.logical_block_size)
q->limits.physical_block_size = q->limits.logical_block_size;
if (q->limits.io_min < q->limits.physical_block_size)
q->limits.io_min = q->limits.physical_block_size;
}
EXPORT_SYMBOL(blk_queue_physical_block_size);
/**
* blk_queue_alignment_offset - set physical block alignment offset
* @q: the request queue for the device
* @alignment: alignment offset in bytes
*
* Description:
* Some devices are naturally misaligned to compensate for things like
* the legacy DOS partition table 63-sector offset. Low-level drivers
* should call this function for devices whose first sector is not
* naturally aligned.
*/
void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
{
q->limits.alignment_offset =
offset & (q->limits.physical_block_size - 1);
q->limits.misaligned = 0;
}
EXPORT_SYMBOL(blk_queue_alignment_offset);
/**
* blk_queue_io_min - set minimum request size for the queue
* @q: the request queue for the device
* @io_min: smallest I/O size in bytes
*
* Description:
* Some devices have an internal block size bigger than the reported
* hardware sector size. This function can be used to signal the
* smallest I/O the device can perform without incurring a performance
* penalty.
*/
void blk_queue_io_min(struct request_queue *q, unsigned int min)
{
q->limits.io_min = min;
if (q->limits.io_min < q->limits.logical_block_size)
q->limits.io_min = q->limits.logical_block_size;
if (q->limits.io_min < q->limits.physical_block_size)
q->limits.io_min = q->limits.physical_block_size;
}
EXPORT_SYMBOL(blk_queue_io_min);
/**
* blk_queue_io_opt - set optimal request size for the queue
* @q: the request queue for the device
* @io_opt: optimal request size in bytes
*
* Description:
* Drivers can call this function to set the preferred I/O request
* size for devices that report such a value.
*/
void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
{
q->limits.io_opt = opt;
}
EXPORT_SYMBOL(blk_queue_io_opt);
/*
* Returns the minimum that is _not_ zero, unless both are zero.
@ -317,14 +410,27 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{
/* zero is "infinity" */
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
t->limits.max_sectors = min_not_zero(queue_max_sectors(t),
queue_max_sectors(b));
t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t),
queue_max_hw_sectors(b));
t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t),
queue_segment_boundary(b));
t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t),
queue_max_phys_segments(b));
t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t),
queue_max_hw_segments(b));
t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t),
queue_max_segment_size(b));
t->limits.logical_block_size = max(queue_logical_block_size(t),
queue_logical_block_size(b));
t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!t->queue_lock)
WARN_ON_ONCE(1);
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
@ -336,6 +442,109 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
}
EXPORT_SYMBOL(blk_queue_stack_limits);
/**
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top)
* @b: the underlying queue limits (bottom)
* @offset: offset to beginning of data within component device
*
* Description:
* Merges two queue_limit structs. Returns 0 if alignment didn't
* change. Returns -1 if adding the bottom device caused
* misalignment.
*/
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset)
{
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
b->seg_boundary_mask);
t->max_phys_segments = min_not_zero(t->max_phys_segments,
b->max_phys_segments);
t->max_hw_segments = min_not_zero(t->max_hw_segments,
b->max_hw_segments);
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);
t->logical_block_size = max(t->logical_block_size,
b->logical_block_size);
t->physical_block_size = max(t->physical_block_size,
b->physical_block_size);
t->io_min = max(t->io_min, b->io_min);
t->no_cluster |= b->no_cluster;
/* Bottom device offset aligned? */
if (offset &&
(offset & (b->physical_block_size - 1)) != b->alignment_offset) {
t->misaligned = 1;
return -1;
}
/* If top has no alignment offset, inherit from bottom */
if (!t->alignment_offset)
t->alignment_offset =
b->alignment_offset & (b->physical_block_size - 1);
/* Top device aligned on logical block boundary? */
if (t->alignment_offset & (t->logical_block_size - 1)) {
t->misaligned = 1;
return -1;
}
return 0;
}
EXPORT_SYMBOL(blk_stack_limits);
/**
* disk_stack_limits - adjust queue limits for stacked drivers
* @disk: MD/DM gendisk (top)
* @bdev: the underlying block device (bottom)
* @offset: offset to beginning of data within component device
*
* Description:
* Merges the limits for two queues. Returns 0 if alignment
* didn't change. Returns -1 if adding the bottom device caused
* misalignment.
*/
void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset)
{
struct request_queue *t = disk->queue;
struct request_queue *b = bdev_get_queue(bdev);
offset += get_start_sect(bdev) << 9;
if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
disk_name(disk, 0, top);
bdevname(bdev, bottom);
printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
top, bottom);
}
if (!t->queue_lock)
WARN_ON_ONCE(1);
else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
unsigned long flags;
spin_lock_irqsave(t->queue_lock, flags);
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
spin_unlock_irqrestore(t->queue_lock, flags);
}
}
EXPORT_SYMBOL(disk_stack_limits);
/**
* blk_queue_dma_pad - set pad mask
* @q: the request queue for the device
@ -396,11 +605,11 @@ int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
return -EINVAL;
/* make room for appending the drain */
--q->max_hw_segments;
--q->max_phys_segments;
blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;
@ -422,7 +631,7 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
__func__, mask);
}
q->seg_boundary_mask = mask;
q->limits.seg_boundary_mask = mask;
}
EXPORT_SYMBOL(blk_queue_segment_boundary);

View File

@ -95,21 +95,36 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
{
int max_sectors_kb = q->max_sectors >> 1;
int max_sectors_kb = queue_max_sectors(q) >> 1;
return queue_var_show(max_sectors_kb, (page));
}
static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page)
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
{
return queue_var_show(q->hardsect_size, page);
return queue_var_show(queue_logical_block_size(q), page);
}
static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_physical_block_size(q), page);
}
static ssize_t queue_io_min_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_io_min(q), page);
}
static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
{
return queue_var_show(queue_io_opt(q), page);
}
static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
max_hw_sectors_kb = q->max_hw_sectors >> 1,
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
@ -117,7 +132,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
return -EINVAL;
spin_lock_irq(q->queue_lock);
q->max_sectors = max_sectors_kb << 1;
blk_queue_max_sectors(q, max_sectors_kb << 1);
spin_unlock_irq(q->queue_lock);
return ret;
@ -125,7 +140,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
{
int max_hw_sectors_kb = q->max_hw_sectors >> 1;
int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
return queue_var_show(max_hw_sectors_kb, (page));
}
@ -249,7 +264,27 @@ static struct queue_sysfs_entry queue_iosched_entry = {
static struct queue_sysfs_entry queue_hw_sector_size_entry = {
.attr = {.name = "hw_sector_size", .mode = S_IRUGO },
.show = queue_hw_sector_size_show,
.show = queue_logical_block_size_show,
};
static struct queue_sysfs_entry queue_logical_block_size_entry = {
.attr = {.name = "logical_block_size", .mode = S_IRUGO },
.show = queue_logical_block_size_show,
};
static struct queue_sysfs_entry queue_physical_block_size_entry = {
.attr = {.name = "physical_block_size", .mode = S_IRUGO },
.show = queue_physical_block_size_show,
};
static struct queue_sysfs_entry queue_io_min_entry = {
.attr = {.name = "minimum_io_size", .mode = S_IRUGO },
.show = queue_io_min_show,
};
static struct queue_sysfs_entry queue_io_opt_entry = {
.attr = {.name = "optimal_io_size", .mode = S_IRUGO },
.show = queue_io_opt_show,
};
static struct queue_sysfs_entry queue_nonrot_entry = {
@ -283,6 +318,10 @@ static struct attribute *default_attrs[] = {
&queue_max_sectors_entry.attr,
&queue_iosched_entry.attr,
&queue_hw_sector_size_entry.attr,
&queue_logical_block_size_entry.attr,
&queue_physical_block_size_entry.attr,
&queue_io_min_entry.attr,
&queue_io_opt_entry.attr,
&queue_nonrot_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
@ -394,16 +433,15 @@ int blk_register_queue(struct gendisk *disk)
if (ret)
return ret;
if (!q->request_fn)
return 0;
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj),
"%s", "queue");
ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
if (ret < 0)
return ret;
kobject_uevent(&q->kobj, KOBJ_ADD);
if (!q->request_fn)
return 0;
ret = elv_register_queue(q);
if (ret) {
kobject_uevent(&q->kobj, KOBJ_REMOVE);

View File

@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
unsigned max_depth, offset;
unsigned max_depth;
int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
* to starve sync IO on behalf of flooding async IO.
*/
max_depth = bqt->max_depth;
if (rq_is_sync(rq))
offset = 0;
else
offset = max_depth >> 2;
if (!rq_is_sync(rq) && max_depth > 1) {
max_depth -= 2;
if (!max_depth)
max_depth = 1;
if (q->in_flight[0] > max_depth)
return 1;
}
do {
tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth)
return 1;
@ -374,7 +377,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
blkdev_dequeue_request(rq);
blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
return 0;
}

View File

@ -122,10 +122,8 @@ void blk_rq_timed_out_timer(unsigned long data)
if (blk_mark_rq_complete(rq))
continue;
blk_rq_timed_out(rq);
} else {
if (!next || time_after(next, rq->deadline))
next = rq->deadline;
}
} else if (!next || time_after(next, rq->deadline))
next = rq->deadline;
}
/*
@ -176,16 +174,14 @@ void blk_add_timer(struct request *req)
BUG_ON(!list_empty(&req->timeout_list));
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
if (req->timeout)
req->deadline = jiffies + req->timeout;
else {
req->deadline = jiffies + q->rq_timeout;
/*
* Some LLDs, like scsi, peek at the timeout to prevent
* a command from being retried forever.
*/
/*
* Some LLDs, like scsi, peek at the timeout to prevent a
* command from being retried forever.
*/
if (!req->timeout)
req->timeout = q->rq_timeout;
}
req->deadline = jiffies + req->timeout;
list_add_tail(&req->timeout_list, &q->timeout_list);
/*

View File

@ -13,6 +13,9 @@ extern struct kobj_type blk_queue_ktype;
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio);
void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
void blk_unplug_work(struct work_struct *work);
@ -43,6 +46,43 @@ static inline void blk_clear_rq_complete(struct request *rq)
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
/*
* Internal elevator interface
*/
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
static inline struct request *__elv_next_request(struct request_queue *q)
{
struct request *rq;
while (1) {
while (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next);
if (blk_do_ordered(q, &rq))
return rq;
}
if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
return NULL;
}
}
static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (e->ops->elevator_activate_req_fn)
e->ops->elevator_activate_req_fn(q, rq);
}
static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (e->ops->elevator_deactivate_req_fn)
e->ops->elevator_deactivate_req_fn(q, rq);
}
#ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout(struct request_queue *);
ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
@ -64,7 +104,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq);
void blk_recalc_rq_segments(struct request *rq);
void blk_recalc_rq_sectors(struct request *rq, int nsect);
void blk_queue_congestion_threshold(struct request_queue *q);
@ -112,9 +151,17 @@ static inline int blk_cpu_to_group(int cpu)
#endif
}
/*
* Contribute to IO statistics IFF:
*
* a) it's attached to a gendisk, and
* b) the queue had IO stats enabled when this request was started, and
* c) it's a file system request or a discard request
*/
static inline int blk_do_io_stat(struct request *rq)
{
return rq->rq_disk && blk_rq_io_stat(rq);
return rq->rq_disk && blk_rq_io_stat(rq) &&
(blk_fs_request(rq) || blk_discard_rq(rq));
}
#endif

View File

@ -446,15 +446,15 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
}
if (rq->next_rq) {
hdr->dout_resid = rq->data_len;
hdr->din_resid = rq->next_rq->data_len;
hdr->dout_resid = rq->resid_len;
hdr->din_resid = rq->next_rq->resid_len;
blk_rq_unmap_user(bidi_bio);
rq->next_rq->bio = NULL;
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
hdr->din_resid = rq->data_len;
hdr->din_resid = rq->resid_len;
else
hdr->dout_resid = rq->data_len;
hdr->dout_resid = rq->resid_len;
/*
* If the request generated a negative error number, return it

View File

@ -349,8 +349,8 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
return rq2;
s1 = rq1->sector;
s2 = rq2->sector;
s1 = blk_rq_pos(rq1);
s2 = blk_rq_pos(rq2);
last = cfqd->last_position;
@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
* Sort strictly based on sector. Smallest to the left,
* largest to the right.
*/
if (sector > cfqq->next_rq->sector)
if (sector > blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_right;
else if (sector < cfqq->next_rq->sector)
else if (sector < blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_left;
else
break;
@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return;
cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector,
&parent, &p);
__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
blk_rq_pos(cfqq->next_rq), &parent, &p);
if (!__cfqq) {
rb_link_node(&cfqq->p_node, parent, p);
rb_insert_color(&cfqq->p_node, cfqq->p_root);
@ -760,7 +760,7 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
cfqd->rq_in_driver);
cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
}
static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
@ -949,10 +949,10 @@ static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
struct request *rq)
{
if (rq->sector >= cfqd->last_position)
return rq->sector - cfqd->last_position;
if (blk_rq_pos(rq) >= cfqd->last_position)
return blk_rq_pos(rq) - cfqd->last_position;
else
return cfqd->last_position - rq->sector;
return cfqd->last_position - blk_rq_pos(rq);
}
#define CIC_SEEK_THR 8 * 1024
@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
if (cfq_rq_close(cfqd, __cfqq->next_rq))
return __cfqq;
if (__cfqq->next_rq->sector < sector)
if (blk_rq_pos(__cfqq->next_rq) < sector)
node = rb_next(&__cfqq->p_node);
else
node = rb_prev(&__cfqq->p_node);
@ -1282,7 +1282,7 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
if (!cfqd->active_cic) {
struct cfq_io_context *cic = RQ_CIC(rq);
atomic_inc(&cic->ioc->refcount);
atomic_long_inc(&cic->ioc->refcount);
cfqd->active_cic = cic;
}
}
@ -1918,10 +1918,10 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
if (!cic->last_request_pos)
sdist = 0;
else if (cic->last_request_pos < rq->sector)
sdist = rq->sector - cic->last_request_pos;
else if (cic->last_request_pos < blk_rq_pos(rq))
sdist = blk_rq_pos(rq) - cic->last_request_pos;
else
sdist = cic->last_request_pos - rq->sector;
sdist = cic->last_request_pos - blk_rq_pos(rq);
/*
* Don't allow the seek distance to get too large from the
@ -2071,7 +2071,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_update_io_seektime(cfqd, cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_request_pos = rq->sector + rq->nr_sectors;
cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
if (cfqq == cfqd->active_queue) {
/*
@ -2088,7 +2088,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
cfqd->busy_queues > 1) {
del_timer(&cfqd->idle_slice_timer);
blk_start_queueing(cfqd->queue);
__blk_run_queue(cfqd->queue);
}
cfq_mark_cfqq_must_dispatch(cfqq);
}
@ -2100,7 +2100,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* this new queue is RT and the current one is BE
*/
cfq_preempt_queue(cfqd, cfqq);
blk_start_queueing(cfqd->queue);
__blk_run_queue(cfqd->queue);
}
}
@ -2345,7 +2345,7 @@ static void cfq_kick_queue(struct work_struct *work)
struct request_queue *q = cfqd->queue;
spin_lock_irq(q->queue_lock);
blk_start_queueing(q);
__blk_run_queue(cfqd->queue);
spin_unlock_irq(q->queue_lock);
}

View File

@ -763,10 +763,10 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
return compat_put_int(arg, block_size(bdev));
case BLKSSZGET: /* get block device hardware sector size */
return compat_put_int(arg, bdev_hardsect_size(bdev));
return compat_put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
return compat_put_ushort(arg,
bdev_get_queue(bdev)->max_sectors);
queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET: /* compatible, but no compat_ptr (!) */
case BLKFRASET:
if (!capable(CAP_SYS_ADMIN))

View File

@ -138,7 +138,7 @@ deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
__rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
if (__rq) {
BUG_ON(sector != __rq->sector);
BUG_ON(sector != blk_rq_pos(__rq));
if (elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_FRONT_MERGE;

View File

@ -51,8 +51,7 @@ static const int elv_hash_shift = 6;
#define ELV_HASH_FN(sec) \
(hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
#define ELV_HASH_ENTRIES (1 << elv_hash_shift)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
/*
* Query io scheduler to see if the current process issuing bio may be
@ -116,9 +115,9 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio)
* we can merge and sequence is ok, check if it's possible
*/
if (elv_rq_merge_ok(__rq, bio)) {
if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
ret = ELEVATOR_BACK_MERGE;
else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
ret = ELEVATOR_FRONT_MERGE;
}
@ -306,22 +305,6 @@ void elevator_exit(struct elevator_queue *e)
}
EXPORT_SYMBOL(elevator_exit);
static void elv_activate_rq(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (e->ops->elevator_activate_req_fn)
e->ops->elevator_activate_req_fn(q, rq);
}
static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (e->ops->elevator_deactivate_req_fn)
e->ops->elevator_deactivate_req_fn(q, rq);
}
static inline void __elv_rqhash_del(struct request *rq)
{
hlist_del_init(&rq->hash);
@ -383,9 +366,9 @@ struct request *elv_rb_add(struct rb_root *root, struct request *rq)
parent = *p;
__rq = rb_entry(parent, struct request, rb_node);
if (rq->sector < __rq->sector)
if (blk_rq_pos(rq) < blk_rq_pos(__rq))
p = &(*p)->rb_left;
else if (rq->sector > __rq->sector)
else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
p = &(*p)->rb_right;
else
return __rq;
@ -413,9 +396,9 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
while (n) {
rq = rb_entry(n, struct request, rb_node);
if (sector < rq->sector)
if (sector < blk_rq_pos(rq))
n = n->rb_left;
else if (sector > rq->sector)
else if (sector > blk_rq_pos(rq))
n = n->rb_right;
else
return rq;
@ -454,14 +437,14 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
break;
if (pos->cmd_flags & stop_flags)
break;
if (rq->sector >= boundary) {
if (pos->sector < boundary)
if (blk_rq_pos(rq) >= boundary) {
if (blk_rq_pos(pos) < boundary)
continue;
} else {
if (pos->sector >= boundary)
if (blk_rq_pos(pos) >= boundary)
break;
}
if (rq->sector >= pos->sector)
if (blk_rq_pos(rq) >= blk_rq_pos(pos))
break;
}
@ -559,7 +542,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
* in_flight count again
*/
if (blk_account_rq(rq)) {
q->in_flight--;
q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq))
elv_deactivate_rq(q, rq);
}
@ -588,6 +571,9 @@ void elv_drain_elevator(struct request_queue *q)
*/
void elv_quiesce_start(struct request_queue *q)
{
if (!q->elevator)
return;
queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
/*
@ -595,7 +581,7 @@ void elv_quiesce_start(struct request_queue *q)
*/
elv_drain_elevator(q);
while (q->rq.elvpriv) {
blk_start_queueing(q);
__blk_run_queue(q);
spin_unlock_irq(q->queue_lock);
msleep(10);
spin_lock_irq(q->queue_lock);
@ -639,8 +625,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
* with anything. There's no point in delaying queue
* processing.
*/
blk_remove_plug(q);
blk_start_queueing(q);
__blk_run_queue(q);
break;
case ELEVATOR_INSERT_SORT:
@ -699,7 +684,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
if (unplug_it && blk_queue_plugged(q)) {
int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
- q->in_flight;
- queue_in_flight(q);
if (nrq >= q->unplug_thresh)
__generic_unplug_device(q);
@ -755,117 +740,6 @@ void elv_add_request(struct request_queue *q, struct request *rq, int where,
}
EXPORT_SYMBOL(elv_add_request);
static inline struct request *__elv_next_request(struct request_queue *q)
{
struct request *rq;
while (1) {
while (!list_empty(&q->queue_head)) {
rq = list_entry_rq(q->queue_head.next);
if (blk_do_ordered(q, &rq))
return rq;
}
if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
return NULL;
}
}
struct request *elv_next_request(struct request_queue *q)
{
struct request *rq;
int ret;
while ((rq = __elv_next_request(q)) != NULL) {
if (!(rq->cmd_flags & REQ_STARTED)) {
/*
* This is the first time the device driver
* sees this request (possibly after
* requeueing). Notify IO scheduler.
*/
if (blk_sorted_rq(rq))
elv_activate_rq(q, rq);
/*
* just mark as started even if we don't start
* it, a request that has been delayed should
* not be passed by new incoming requests
*/
rq->cmd_flags |= REQ_STARTED;
trace_block_rq_issue(q, rq);
}
if (!q->boundary_rq || q->boundary_rq == rq) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = NULL;
}
if (rq->cmd_flags & REQ_DONTPREP)
break;
if (q->dma_drain_size && rq->data_len) {
/*
* make sure space for the drain appears we
* know we can do this because max_hw_segments
* has been adjusted to be one fewer than the
* device can handle
*/
rq->nr_phys_segments++;
}
if (!q->prep_rq_fn)
break;
ret = q->prep_rq_fn(q, rq);
if (ret == BLKPREP_OK) {
break;
} else if (ret == BLKPREP_DEFER) {
/*
* the request may have been (partially) prepped.
* we need to keep this request in the front to
* avoid resource deadlock. REQ_STARTED will
* prevent other fs requests from passing this one.
*/
if (q->dma_drain_size && rq->data_len &&
!(rq->cmd_flags & REQ_DONTPREP)) {
/*
* remove the space for the drain we added
* so that we don't add it again
*/
--rq->nr_phys_segments;
}
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
rq->cmd_flags |= REQ_QUIET;
__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
} else {
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
}
}
return rq;
}
EXPORT_SYMBOL(elv_next_request);
void elv_dequeue_request(struct request_queue *q, struct request *rq)
{
BUG_ON(list_empty(&rq->queuelist));
BUG_ON(ELV_ON_HASH(rq));
list_del_init(&rq->queuelist);
/*
* the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at
* the driver side.
*/
if (blk_account_rq(rq))
q->in_flight++;
}
int elv_queue_empty(struct request_queue *q)
{
struct elevator_queue *e = q->elevator;
@ -935,7 +809,12 @@ void elv_abort_queue(struct request_queue *q)
rq = list_entry_rq(q->queue_head.next);
rq->cmd_flags |= REQ_QUIET;
trace_block_rq_abort(q, rq);
__blk_end_request(rq, -EIO, blk_rq_bytes(rq));
/*
* Mark this request as started so we don't trigger
* any debug logic in the end I/O path.
*/
blk_start_request(rq);
__blk_end_request_all(rq, -EIO);
}
}
EXPORT_SYMBOL(elv_abort_queue);
@ -948,7 +827,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
* request is released from the driver, io must be done
*/
if (blk_account_rq(rq)) {
q->in_flight--;
q->in_flight[rq_is_sync(rq)]--;
if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
e->ops->elevator_completed_req_fn(q, rq);
}
@ -963,11 +842,11 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
if (!list_empty(&q->queue_head))
next = list_entry_rq(q->queue_head.next);
if (!q->in_flight &&
if (!queue_in_flight(q) &&
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
(!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
blk_start_queueing(q);
__blk_run_queue(q);
}
}
}
@ -1175,6 +1054,9 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
char elevator_name[ELV_NAME_MAX];
struct elevator_type *e;
if (!q->elevator)
return count;
strlcpy(elevator_name, name, sizeof(elevator_name));
strstrip(elevator_name);
@ -1198,10 +1080,15 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
ssize_t elv_iosched_show(struct request_queue *q, char *name)
{
struct elevator_queue *e = q->elevator;
struct elevator_type *elv = e->elevator_type;
struct elevator_type *elv;
struct elevator_type *__e;
int len = 0;
if (!q->elevator)
return sprintf(name, "none\n");
elv = e->elevator_type;
spin_lock(&elv_list_lock);
list_for_each_entry(__e, &elv_list, list) {
if (!strcmp(elv->elevator_name, __e->elevator_name))

View File

@ -852,11 +852,21 @@ static ssize_t disk_capability_show(struct device *dev,
return sprintf(buf, "%x\n", disk->flags);
}
static ssize_t disk_alignment_offset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue));
}
static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL);
static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
#ifdef CONFIG_FAIL_MAKE_REQUEST
@ -875,6 +885,7 @@ static struct attribute *disk_attrs[] = {
&dev_attr_removable.attr,
&dev_attr_ro.attr,
&dev_attr_size.attr,
&dev_attr_alignment_offset.attr,
&dev_attr_capability.attr,
&dev_attr_stat.attr,
#ifdef CONFIG_FAIL_MAKE_REQUEST

View File

@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
bio->bi_private = &wait;
bio->bi_sector = start;
if (len > q->max_hw_sectors) {
bio->bi_size = q->max_hw_sectors << 9;
len -= q->max_hw_sectors;
start += q->max_hw_sectors;
if (len > queue_max_hw_sectors(q)) {
bio->bi_size = queue_max_hw_sectors(q) << 9;
len -= queue_max_hw_sectors(q);
start += queue_max_hw_sectors(q);
} else {
bio->bi_size = len << 9;
len = 0;
@ -311,9 +311,9 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
return put_int(arg, block_size(bdev));
case BLKSSZGET: /* get block device hardware sector size */
return put_int(arg, bdev_hardsect_size(bdev));
return put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET:
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))

View File

@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
static int sg_get_reserved_size(struct request_queue *q, int __user *p)
{
unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
return put_user(val, p);
}
@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
if (size < 0)
return -EINVAL;
if (size > (q->max_sectors << 9))
size = q->max_sectors << 9;
if (size > (queue_max_sectors(q) << 9))
size = queue_max_sectors(q) << 9;
q->sg_reserved_size = size;
return 0;
@ -230,7 +230,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
hdr->resid = rq->data_len;
hdr->resid = rq->resid_len;
hdr->sb_len_wr = 0;
if (rq->sense_len && hdr->sbp) {
@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->cmd_len > BLK_MAX_CDB)
return -EINVAL;
if (hdr->dxfer_len > (q->max_hw_sectors << 9))
if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
return -EIO;
if (hdr->dxfer_len)
@ -500,9 +500,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->data = NULL;
rq->data_len = 0;
rq->extra_len = 0;
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
rq->cmd[0] = cmd;
rq->cmd[4] = data;

View File

@ -1084,7 +1084,7 @@ static int atapi_drain_needed(struct request *rq)
if (likely(!blk_pc_request(rq)))
return 0;
if (!rq->data_len || (rq->cmd_flags & REQ_RW))
if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
return 0;
return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;

View File

@ -3321,7 +3321,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
DAC960_Command_T *Command;
while(1) {
Request = elv_next_request(req_q);
Request = blk_peek_request(req_q);
if (!Request)
return 1;
@ -3338,10 +3338,10 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
}
Command->Completion = Request->end_io_data;
Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
Command->BlockNumber = Request->sector;
Command->BlockCount = Request->nr_sectors;
Command->BlockNumber = blk_rq_pos(Request);
Command->BlockCount = blk_rq_sectors(Request);
Command->Request = Request;
blkdev_dequeue_request(Request);
blk_start_request(Request);
Command->SegmentCount = blk_rq_map_sg(req_q,
Command->Request, Command->cmd_sglist);
/* pci_map_sg MAY change the value of SegCount */
@ -3431,7 +3431,7 @@ static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
* successfully as possible.
*/
Command->SegmentCount = 1;
Command->BlockNumber = Request->sector;
Command->BlockNumber = blk_rq_pos(Request);
Command->BlockCount = 1;
DAC960_QueueReadWriteCommand(Command);
return;

View File

@ -412,7 +412,7 @@ config ATA_OVER_ETH
config MG_DISK
tristate "mGine mflash, gflash support"
depends on ARM && ATA && GPIOLIB
depends on ARM && GPIOLIB
help
mGine mFlash(gFlash) block device driver

View File

@ -112,8 +112,6 @@ module_param(fd_def_df0, ulong, 0);
MODULE_LICENSE("GPL");
static struct request_queue *floppy_queue;
#define QUEUE (floppy_queue)
#define CURRENT elv_next_request(floppy_queue)
/*
* Macros
@ -1335,64 +1333,60 @@ static int get_track(int drive, int track)
static void redo_fd_request(void)
{
struct request *rq;
unsigned int cnt, block, track, sector;
int drive;
struct amiga_floppy_struct *floppy;
char *data;
unsigned long flags;
int err;
repeat:
if (!CURRENT) {
next_req:
rq = blk_fetch_request(floppy_queue);
if (!rq) {
/* Nothing left to do */
return;
}
floppy = CURRENT->rq_disk->private_data;
floppy = rq->rq_disk->private_data;
drive = floppy - unit;
next_segment:
/* Here someone could investigate to be more efficient */
for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) {
for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
CURRENT->sector,cnt,
(rq_data_dir(CURRENT) == READ) ? "read" : "write");
blk_rq_pos(rq), cnt,
(rq_data_dir(rq) == READ) ? "read" : "write");
#endif
block = CURRENT->sector + cnt;
block = blk_rq_pos(rq) + cnt;
if ((int)block > floppy->blocks) {
end_request(CURRENT, 0);
goto repeat;
err = -EIO;
break;
}
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
data = CURRENT->buffer + 512 * cnt;
data = rq->buffer + 512 * cnt;
#ifdef DEBUG
printk("access to track %d, sector %d, with buffer at "
"0x%08lx\n", track, sector, data);
#endif
if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
printk(KERN_WARNING "do_fd_request: unknown command\n");
end_request(CURRENT, 0);
goto repeat;
}
if (get_track(drive, track) == -1) {
end_request(CURRENT, 0);
goto repeat;
err = -EIO;
break;
}
switch (rq_data_dir(CURRENT)) {
case READ:
if (rq_data_dir(rq) == READ) {
memcpy(data, floppy->trackbuf + sector * 512, 512);
break;
case WRITE:
} else {
memcpy(floppy->trackbuf + sector * 512, data, 512);
/* keep the drive spinning while writes are scheduled */
if (!fd_motor_on(drive)) {
end_request(CURRENT, 0);
goto repeat;
err = -EIO;
break;
}
/*
* setup a callback to write the track buffer
@ -1404,14 +1398,12 @@ static void redo_fd_request(void)
/* reset the timer */
mod_timer (flush_track_timer + drive, jiffies + 1);
local_irq_restore(flags);
break;
}
}
CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
CURRENT->sector += CURRENT->current_nr_sectors;
end_request(CURRENT, 1);
goto repeat;
if (__blk_end_request_cur(rq, err))
goto next_segment;
goto next_req;
}
static void do_fd_request(struct request_queue * q)

View File

@ -79,9 +79,7 @@
#undef DEBUG
static struct request_queue *floppy_queue;
#define QUEUE (floppy_queue)
#define CURRENT elv_next_request(floppy_queue)
static struct request *fd_request;
/* Disk types: DD, HD, ED */
static struct atari_disk_type {
@ -376,6 +374,12 @@ static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0);
static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0);
static DEFINE_TIMER(fd_timer, check_change, 0, 0);
static void fd_end_request_cur(int err)
{
if (!__blk_end_request_cur(fd_request, err))
fd_request = NULL;
}
static inline void start_motor_off_timer(void)
{
mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
@ -606,15 +610,15 @@ static void fd_error( void )
return;
}
if (!CURRENT)
if (!fd_request)
return;
CURRENT->errors++;
if (CURRENT->errors >= MAX_ERRORS) {
fd_request->errors++;
if (fd_request->errors >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
end_request(CURRENT, 0);
fd_end_request_cur(-EIO);
}
else if (CURRENT->errors == RECALIBRATE_ERRORS) {
else if (fd_request->errors == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
@ -725,16 +729,14 @@ static void do_fd_action( int drive )
if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
if (ReqCmd == READ) {
copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
if (++ReqCnt < CURRENT->current_nr_sectors) {
if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( drive );
goto repeat;
}
else {
/* all sectors finished */
CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
CURRENT->sector += CURRENT->current_nr_sectors;
end_request(CURRENT, 1);
fd_end_request_cur(0);
redo_fd_request();
return;
}
@ -1132,16 +1134,14 @@ static void fd_rwsec_done1(int status)
}
}
if (++ReqCnt < CURRENT->current_nr_sectors) {
if (++ReqCnt < blk_rq_cur_sectors(fd_request)) {
/* read next sector */
setup_req_params( SelectedDrive );
do_fd_action( SelectedDrive );
}
else {
/* all sectors finished */
CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
CURRENT->sector += CURRENT->current_nr_sectors;
end_request(CURRENT, 1);
fd_end_request_cur(0);
redo_fd_request();
}
return;
@ -1382,7 +1382,7 @@ static void setup_req_params( int drive )
ReqData = ReqBuffer + 512 * ReqCnt;
if (UseTrackbuffer)
read_track = (ReqCmd == READ && CURRENT->errors == 0);
read_track = (ReqCmd == READ && fd_request->errors == 0);
else
read_track = 0;
@ -1396,25 +1396,27 @@ static void redo_fd_request(void)
int drive, type;
struct atari_floppy_struct *floppy;
DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
CURRENT ? CURRENT->sector : 0 ));
DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
fd_request ? blk_rq_pos(fd_request) : 0 ));
IsFormatting = 0;
repeat:
if (!fd_request) {
fd_request = blk_fetch_request(floppy_queue);
if (!fd_request)
goto the_end;
}
if (!CURRENT)
goto the_end;
floppy = CURRENT->rq_disk->private_data;
floppy = fd_request->rq_disk->private_data;
drive = floppy - unit;
type = floppy->type;
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
end_request(CURRENT, 0);
fd_end_request_cur(-EIO);
goto repeat;
}
@ -1430,12 +1432,12 @@ repeat:
/* user supplied disk type */
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
end_request(CURRENT, 0);
fd_end_request_cur(-EIO);
goto repeat;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
end_request(CURRENT, 0);
fd_end_request_cur(-EIO);
goto repeat;
}
type = minor2disktype[type].index;
@ -1444,8 +1446,8 @@ repeat:
UD.autoprobe = 0;
}
if (CURRENT->sector + 1 > UDT->blocks) {
end_request(CURRENT, 0);
if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
fd_end_request_cur(-EIO);
goto repeat;
}
@ -1453,9 +1455,9 @@ repeat:
del_timer( &motor_off_timer );
ReqCnt = 0;
ReqCmd = rq_data_dir(CURRENT);
ReqBlock = CURRENT->sector;
ReqBuffer = CURRENT->buffer;
ReqCmd = rq_data_dir(fd_request);
ReqBlock = blk_rq_pos(fd_request);
ReqBuffer = fd_request->buffer;
setup_req_params( drive );
do_fd_action( drive );

View File

@ -407,12 +407,7 @@ static int __init ramdisk_size(char *str)
rd_size = simple_strtol(str, NULL, 0);
return 1;
}
static int __init ramdisk_size2(char *str)
{
return ramdisk_size(str);
}
__setup("ramdisk=", ramdisk_size);
__setup("ramdisk_size=", ramdisk_size2);
__setup("ramdisk_size=", ramdisk_size);
#endif
/*

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,11 @@
#define IO_OK 0
#define IO_ERROR 1
#define IO_NEEDS_RETRY 3
#define VENDOR_LEN 8
#define MODEL_LEN 16
#define REV_LEN 4
struct ctlr_info;
typedef struct ctlr_info ctlr_info_t;
@ -34,23 +39,20 @@ typedef struct _drive_info_struct
int cylinders;
int raid_level; /* set to -1 to indicate that
* the drive is not in use/configured
*/
int busy_configuring; /*This is set when the drive is being removed
*to prevent it from being opened or it's queue
*from being started.
*/
__u8 serial_no[16]; /* from inquiry page 0x83, */
/* not necc. null terminated. */
*/
int busy_configuring; /* This is set when a drive is being removed
* to prevent it from being opened or it's
* queue from being started.
*/
struct device dev;
__u8 serial_no[16]; /* from inquiry page 0x83,
* not necc. null terminated.
*/
char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
char model[MODEL_LEN + 1]; /* SCSI model string */
char rev[REV_LEN + 1]; /* SCSI revision string */
} drive_info_struct;
#ifdef CONFIG_CISS_SCSI_TAPE
struct sendcmd_reject_list {
int ncompletions;
unsigned long *complete; /* array of NR_CMDS tags */
};
#endif
struct ctlr_info
{
int ctlr;
@ -118,11 +120,11 @@ struct ctlr_info
void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
/* list of block side commands the scsi error handling sucked up */
/* and saved for later processing */
struct sendcmd_reject_list scsi_rejects;
#endif
unsigned char alive;
struct completion *rescan_wait;
struct task_struct *cciss_scan_thread;
struct device dev;
};
/* Defining the diffent access_menthods */

View File

@ -217,6 +217,8 @@ typedef union _LUNAddr_struct {
LogDevAddr_struct LogDev;
} LUNAddr_struct;
#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
typedef struct _CommandListHeader_struct {
BYTE ReplyQueue;
BYTE SGList;

View File

@ -44,20 +44,13 @@
#define CCISS_ABORT_MSG 0x00
#define CCISS_RESET_MSG 0x01
/* some prototypes... */
static int sendcmd(
__u8 cmd,
int ctlr,
void *buff,
size_t size,
unsigned int use_unit_num, /* 0: address the controller,
1: address logical volume log_unit,
2: address is in scsi3addr */
unsigned int log_unit,
__u8 page_code,
unsigned char *scsi3addr,
static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
size_t size,
__u8 page_code, unsigned char *scsi3addr,
int cmd_type);
static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool);
static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool);
static int cciss_scsi_proc_info(
struct Scsi_Host *sh,
@ -1575,6 +1568,75 @@ cciss_seq_tape_report(struct seq_file *seq, int ctlr)
CPQ_TAPE_UNLOCK(ctlr, flags);
}
static int wait_for_device_to_become_ready(ctlr_info_t *h,
unsigned char lunaddr[])
{
int rc;
int count = 0;
int waittime = HZ;
CommandList_struct *c;
c = cmd_alloc(h, 1);
if (!c) {
printk(KERN_WARNING "cciss%d: out of memory in "
"wait_for_device_to_become_ready.\n", h->ctlr);
return IO_ERROR;
}
/* Send test unit ready until device ready, or give up. */
while (count < 20) {
/* Wait for a bit. do this first, because if we send
* the TUR right away, the reset will just abort it.
*/
schedule_timeout_uninterruptible(waittime);
count++;
/* Increase wait time with each try, up to a point. */
if (waittime < (HZ * 30))
waittime = waittime * 2;
/* Send the Test Unit Ready */
rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0,
lunaddr, TYPE_CMD);
if (rc == 0)
rc = sendcmd_withirq_core(h, c, 0);
(void) process_sendcmd_error(h, c);
if (rc != 0)
goto retry_tur;
if (c->err_info->CommandStatus == CMD_SUCCESS)
break;
if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
if (c->err_info->SenseInfo[2] == NO_SENSE)
break;
if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) {
unsigned char asc;
asc = c->err_info->SenseInfo[12];
check_for_unit_attention(h, c);
if (asc == POWER_OR_RESET)
break;
}
}
retry_tur:
printk(KERN_WARNING "cciss%d: Waiting %d secs "
"for device to become ready.\n",
h->ctlr, waittime / HZ);
rc = 1; /* device not ready. */
}
if (rc)
printk("cciss%d: giving up on device.\n", h->ctlr);
else
printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr);
cmd_free(h, c, 1);
return rc;
}
/* Need at least one of these error handlers to keep ../scsi/hosts.c from
* complaining. Doing a host- or bus-reset can't do anything good here.
@ -1591,6 +1653,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
{
int rc;
CommandList_struct *cmd_in_trouble;
unsigned char lunaddr[8];
ctlr_info_t **c;
int ctlr;
@ -1600,19 +1663,15 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
return FAILED;
ctlr = (*c)->ctlr;
printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr);
/* find the command that's giving us trouble */
cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
if (cmd_in_trouble == NULL) { /* paranoia */
if (cmd_in_trouble == NULL) /* paranoia */
return FAILED;
}
memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
/* send a reset to the SCSI LUN which the command was sent to */
rc = sendcmd(CCISS_RESET_MSG, ctlr, NULL, 0, 2, 0, 0,
(unsigned char *) &cmd_in_trouble->Header.LUN.LunAddrBytes[0],
rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr,
TYPE_MSG);
/* sendcmd turned off interrupts on the board, turn 'em back on. */
(*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
if (rc == 0)
if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0)
return SUCCESS;
printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr);
return FAILED;
@ -1622,6 +1681,7 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
{
int rc;
CommandList_struct *cmd_to_abort;
unsigned char lunaddr[8];
ctlr_info_t **c;
int ctlr;
@ -1636,12 +1696,9 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
if (cmd_to_abort == NULL) /* paranoia */
return FAILED;
rc = sendcmd(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
0, 2, 0, 0,
(unsigned char *) &cmd_to_abort->Header.LUN.LunAddrBytes[0],
TYPE_MSG);
/* sendcmd turned off interrupts on the board, turn 'em back on. */
(*c)->access.set_intr_mask(*c, CCISS_INTR_ON);
memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag,
0, 0, lunaddr, TYPE_MSG);
if (rc == 0)
return SUCCESS;
return FAILED;

View File

@ -474,7 +474,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
disk->fops = &ida_fops;
if (j && !drv->nr_blks)
continue;
blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
disk->queue = hba[i]->queue;
disk->private_data = drv;
@ -903,7 +903,7 @@ static void do_ida_request(struct request_queue *q)
goto startio;
queue_next:
creq = elv_next_request(q);
creq = blk_peek_request(q);
if (!creq)
goto startio;
@ -912,17 +912,18 @@ queue_next:
if ((c = cmd_alloc(h,1)) == NULL)
goto startio;
blkdev_dequeue_request(creq);
blk_start_request(creq);
c->ctlr = h->ctlr;
c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
c->hdr.size = sizeof(rblk_t) >> 2;
c->size += sizeof(rblk_t);
c->req.hdr.blk = creq->sector;
c->req.hdr.blk = blk_rq_pos(creq);
c->rq = creq;
DBGPX(
printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
printk("sector=%d, nr_sectors=%u\n",
blk_rq_pos(creq), blk_rq_sectors(creq));
);
sg_init_table(tmp_sg, SG_MAX);
seg = blk_rq_map_sg(q, creq, tmp_sg);
@ -940,9 +941,9 @@ DBGPX(
tmp_sg[i].offset,
tmp_sg[i].length, dir);
}
DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
c->req.hdr.sg_cnt = seg;
c->req.hdr.blk_cnt = creq->nr_sectors;
c->req.hdr.blk_cnt = blk_rq_sectors(creq);
c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
c->type = CMD_RWREQ;
@ -1024,8 +1025,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
cmd->req.sg[i].size, ddir);
DBGPX(printk("Done with %p\n", rq););
if (__blk_end_request(rq, error, blk_rq_bytes(rq)))
BUG();
__blk_end_request_all(rq, error);
}
/*
@ -1546,7 +1546,7 @@ static int revalidate_allvol(ctlr_info_t *host)
drv_info_t *drv = &host->drv[i];
if (i && !drv->nr_blks)
continue;
blk_queue_hardsect_size(host->queue, drv->blk_size);
blk_queue_logical_block_size(host->queue, drv->blk_size);
set_capacity(disk, drv->nr_blks);
disk->queue = host->queue;
disk->private_data = drv;

View File

@ -931,7 +931,7 @@ static inline void unlock_fdc(void)
del_timer(&fd_timeout);
cont = NULL;
clear_bit(0, &fdc_busy);
if (elv_next_request(floppy_queue))
if (current_req || blk_peek_request(floppy_queue))
do_fd_request(floppy_queue);
spin_unlock_irqrestore(&floppy_lock, flags);
wake_up(&fdc_wait);
@ -2303,7 +2303,7 @@ static void floppy_end_request(struct request *req, int error)
/* current_count_sectors can be zero if transfer failed */
if (error)
nr_sectors = req->current_nr_sectors;
nr_sectors = blk_rq_cur_sectors(req);
if (__blk_end_request(req, error, nr_sectors << 9))
return;
@ -2332,7 +2332,7 @@ static void request_done(int uptodate)
if (uptodate) {
/* maintain values for invalidation on geometry
* change */
block = current_count_sectors + req->sector;
block = current_count_sectors + blk_rq_pos(req);
INFBOUND(DRS->maxblock, block);
if (block > _floppy->sect)
DRS->maxtrack = 1;
@ -2346,10 +2346,10 @@ static void request_done(int uptodate)
/* record write error information */
DRWE->write_errors++;
if (DRWE->write_errors == 1) {
DRWE->first_error_sector = req->sector;
DRWE->first_error_sector = blk_rq_pos(req);
DRWE->first_error_generation = DRS->generation;
}
DRWE->last_error_sector = req->sector;
DRWE->last_error_sector = blk_rq_pos(req);
DRWE->last_error_generation = DRS->generation;
}
spin_lock_irqsave(q->queue_lock, flags);
@ -2503,24 +2503,23 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
max_sector = transfer_size(ssize,
min(max_sector, max_sector_2),
current_req->nr_sectors);
blk_rq_sectors(current_req));
if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
buffer_max > fsector_t + current_req->nr_sectors)
buffer_max > fsector_t + blk_rq_sectors(current_req))
current_count_sectors = min_t(int, buffer_max - fsector_t,
current_req->nr_sectors);
blk_rq_sectors(current_req));
remaining = current_count_sectors << 9;
#ifdef FLOPPY_SANITY_CHECK
if ((remaining >> 9) > current_req->nr_sectors &&
CT(COMMAND) == FD_WRITE) {
if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) {
DPRINT("in copy buffer\n");
printk("current_count_sectors=%ld\n", current_count_sectors);
printk("remaining=%d\n", remaining >> 9);
printk("current_req->nr_sectors=%ld\n",
current_req->nr_sectors);
printk("current_req->nr_sectors=%u\n",
blk_rq_sectors(current_req));
printk("current_req->current_nr_sectors=%u\n",
current_req->current_nr_sectors);
blk_rq_cur_sectors(current_req));
printk("max_sector=%d\n", max_sector);
printk("ssize=%d\n", ssize);
}
@ -2530,7 +2529,7 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
size = current_req->current_nr_sectors << 9;
size = blk_rq_cur_bytes(current_req);
rq_for_each_segment(bv, current_req, iter) {
if (!remaining)
@ -2648,10 +2647,10 @@ static int make_raw_rw_request(void)
max_sector = _floppy->sect * _floppy->head;
TRACK = (int)current_req->sector / max_sector;
fsector_t = (int)current_req->sector % max_sector;
TRACK = (int)blk_rq_pos(current_req) / max_sector;
fsector_t = (int)blk_rq_pos(current_req) % max_sector;
if (_floppy->track && TRACK >= _floppy->track) {
if (current_req->current_nr_sectors & 1) {
if (blk_rq_cur_sectors(current_req) & 1) {
current_count_sectors = 1;
return 1;
} else
@ -2669,7 +2668,7 @@ static int make_raw_rw_request(void)
if (fsector_t >= max_sector) {
current_count_sectors =
min_t(int, _floppy->sect - fsector_t,
current_req->nr_sectors);
blk_rq_sectors(current_req));
return 1;
}
SIZECODE = 2;
@ -2720,7 +2719,7 @@ static int make_raw_rw_request(void)
in_sector_offset = (fsector_t % _floppy->sect) % ssize;
aligned_sector_t = fsector_t - in_sector_offset;
max_size = current_req->nr_sectors;
max_size = blk_rq_sectors(current_req);
if ((raw_cmd->track == buffer_track) &&
(current_drive == buffer_drive) &&
(fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
@ -2729,10 +2728,10 @@ static int make_raw_rw_request(void)
copy_buffer(1, max_sector, buffer_max);
return 1;
}
} else if (in_sector_offset || current_req->nr_sectors < ssize) {
} else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) {
if (CT(COMMAND) == FD_WRITE) {
if (fsector_t + current_req->nr_sectors > ssize &&
fsector_t + current_req->nr_sectors < ssize + ssize)
if (fsector_t + blk_rq_sectors(current_req) > ssize &&
fsector_t + blk_rq_sectors(current_req) < ssize + ssize)
max_size = ssize + ssize;
else
max_size = ssize;
@ -2776,7 +2775,7 @@ static int make_raw_rw_request(void)
(indirect * 2 > direct * 3 &&
*errors < DP->max_errors.read_track && ((!probing
|| (DP->read_track & (1 << DRS->probed_format)))))) {
max_size = current_req->nr_sectors;
max_size = blk_rq_sectors(current_req);
} else {
raw_cmd->kernel_data = current_req->buffer;
raw_cmd->length = current_count_sectors << 9;
@ -2801,7 +2800,7 @@ static int make_raw_rw_request(void)
fsector_t > buffer_max ||
fsector_t < buffer_min ||
((CT(COMMAND) == FD_READ ||
(!in_sector_offset && current_req->nr_sectors >= ssize)) &&
(!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) &&
max_sector > 2 * max_buffer_sectors + buffer_min &&
max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
/* not enough space */
@ -2879,8 +2878,8 @@ static int make_raw_rw_request(void)
printk("write\n");
return 0;
}
} else if (raw_cmd->length > current_req->nr_sectors << 9 ||
current_count_sectors > current_req->nr_sectors) {
} else if (raw_cmd->length > blk_rq_bytes(current_req) ||
current_count_sectors > blk_rq_sectors(current_req)) {
DPRINT("buffer overrun in direct transfer\n");
return 0;
} else if (raw_cmd->length < current_count_sectors << 9) {
@ -2913,7 +2912,7 @@ static void redo_fd_request(void)
struct request *req;
spin_lock_irq(floppy_queue->queue_lock);
req = elv_next_request(floppy_queue);
req = blk_fetch_request(floppy_queue);
spin_unlock_irq(floppy_queue->queue_lock);
if (!req) {
do_floppy = NULL;
@ -2990,8 +2989,9 @@ static void do_fd_request(struct request_queue * q)
if (usage_count == 0) {
printk("warning: usage count=0, current_req=%p exiting\n",
current_req);
printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector,
current_req->cmd_type, current_req->cmd_flags);
printk("sect=%ld type=%x flags=%x\n",
(long)blk_rq_pos(current_req), current_req->cmd_type,
current_req->cmd_flags);
return;
}
if (test_bit(0, &fdc_busy)) {
@ -4148,6 +4148,24 @@ static void floppy_device_release(struct device *dev)
{
}
static int floppy_resume(struct platform_device *dev)
{
int fdc;
for (fdc = 0; fdc < N_FDC; fdc++)
if (FDCS->address != -1)
user_reset_fdc(-1, FD_RESET_ALWAYS, 0);
return 0;
}
static struct platform_driver floppy_driver = {
.resume = floppy_resume,
.driver = {
.name = "floppy",
},
};
static struct platform_device floppy_device[N_DRIVE];
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
@ -4196,10 +4214,14 @@ static int __init floppy_init(void)
if (err)
goto out_put_disk;
err = platform_driver_register(&floppy_driver);
if (err)
goto out_unreg_blkdev;
floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!floppy_queue) {
err = -ENOMEM;
goto out_unreg_blkdev;
goto out_unreg_driver;
}
blk_queue_max_sectors(floppy_queue, 64);
@ -4346,6 +4368,8 @@ out_flush_work:
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
blk_cleanup_queue(floppy_queue);
out_unreg_driver:
platform_driver_unregister(&floppy_driver);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
@ -4566,6 +4590,7 @@ static void __exit floppy_module_exit(void)
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);

View File

@ -98,10 +98,9 @@
static DEFINE_SPINLOCK(hd_lock);
static struct request_queue *hd_queue;
static struct request *hd_req;
#define MAJOR_NR HD_MAJOR
#define QUEUE (hd_queue)
#define CURRENT elv_next_request(hd_queue)
#define TIMEOUT_VALUE (6*HZ)
#define HD_DELAY 0
@ -195,11 +194,24 @@ static void __init hd_setup(char *str, int *ints)
NR_HD = hdind+1;
}
static bool hd_end_request(int err, unsigned int bytes)
{
if (__blk_end_request(hd_req, err, bytes))
return true;
hd_req = NULL;
return false;
}
static bool hd_end_request_cur(int err)
{
return hd_end_request(err, blk_rq_cur_bytes(hd_req));
}
static void dump_status(const char *msg, unsigned int stat)
{
char *name = "hd?";
if (CURRENT)
name = CURRENT->rq_disk->disk_name;
if (hd_req)
name = hd_req->rq_disk->disk_name;
#ifdef VERBOSE_ERRORS
printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
@ -227,8 +239,8 @@ static void dump_status(const char *msg, unsigned int stat)
if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) {
printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
if (CURRENT)
printk(", sector=%ld", CURRENT->sector);
if (hd_req)
printk(", sector=%ld", blk_rq_pos(hd_req));
}
printk("\n");
}
@ -406,11 +418,12 @@ static void unexpected_hd_interrupt(void)
*/
static void bad_rw_intr(void)
{
struct request *req = CURRENT;
struct request *req = hd_req;
if (req != NULL) {
struct hd_i_struct *disk = req->rq_disk->private_data;
if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
end_request(req, 0);
hd_end_request_cur(-EIO);
disk->special_op = disk->recalibrate = 1;
} else if (req->errors % RESET_FREQ == 0)
reset = 1;
@ -452,37 +465,30 @@ static void read_intr(void)
bad_rw_intr();
hd_request();
return;
ok_to_read:
req = CURRENT;
req = hd_req;
insw(HD_DATA, req->buffer, 256);
req->sector++;
req->buffer += 512;
req->errors = 0;
i = --req->nr_sectors;
--req->current_nr_sectors;
#ifdef DEBUG
printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
req->rq_disk->disk_name, req->sector, req->nr_sectors,
req->buffer+512);
printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
req->rq_disk->disk_name, blk_rq_pos(req) + 1,
blk_rq_sectors(req) - 1, req->buffer+512);
#endif
if (req->current_nr_sectors <= 0)
end_request(req, 1);
if (i > 0) {
if (hd_end_request(0, 512)) {
SET_HANDLER(&read_intr);
return;
}
(void) inb_p(HD_STATUS);
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
if (elv_next_request(QUEUE))
hd_request();
return;
hd_request();
}
static void write_intr(void)
{
struct request *req = CURRENT;
struct request *req = hd_req;
int i;
int retries = 100000;
@ -492,30 +498,25 @@ static void write_intr(void)
continue;
if (!OK_STATUS(i))
break;
if ((req->nr_sectors <= 1) || (i & DRQ_STAT))
if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
goto ok_to_write;
} while (--retries > 0);
dump_status("write_intr", i);
bad_rw_intr();
hd_request();
return;
ok_to_write:
req->sector++;
i = --req->nr_sectors;
--req->current_nr_sectors;
req->buffer += 512;
if (!i || (req->bio && req->current_nr_sectors <= 0))
end_request(req, 1);
if (i > 0) {
if (hd_end_request(0, 512)) {
SET_HANDLER(&write_intr);
outsw(HD_DATA, req->buffer, 256);
} else {
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
hd_request();
return;
}
return;
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
hd_request();
}
static void recal_intr(void)
@ -537,18 +538,18 @@ static void hd_times_out(unsigned long dummy)
do_hd = NULL;
if (!CURRENT)
if (!hd_req)
return;
spin_lock_irq(hd_queue->queue_lock);
reset = 1;
name = CURRENT->rq_disk->disk_name;
name = hd_req->rq_disk->disk_name;
printk("%s: timeout\n", name);
if (++CURRENT->errors >= MAX_ERRORS) {
if (++hd_req->errors >= MAX_ERRORS) {
#ifdef DEBUG
printk("%s: too many errors\n", name);
#endif
end_request(CURRENT, 0);
hd_end_request_cur(-EIO);
}
hd_request();
spin_unlock_irq(hd_queue->queue_lock);
@ -563,7 +564,7 @@ static int do_special_op(struct hd_i_struct *disk, struct request *req)
}
if (disk->head > 16) {
printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
end_request(req, 0);
hd_end_request_cur(-EIO);
}
disk->special_op = 0;
return 1;
@ -590,24 +591,27 @@ static void hd_request(void)
repeat:
del_timer(&device_timer);
req = CURRENT;
if (!req) {
do_hd = NULL;
return;
if (!hd_req) {
hd_req = blk_fetch_request(hd_queue);
if (!hd_req) {
do_hd = NULL;
return;
}
}
req = hd_req;
if (reset) {
reset_hd();
return;
}
disk = req->rq_disk->private_data;
block = req->sector;
nsect = req->nr_sectors;
block = blk_rq_pos(req);
nsect = blk_rq_sectors(req);
if (block >= get_capacity(req->rq_disk) ||
((block+nsect) > get_capacity(req->rq_disk))) {
printk("%s: bad access: block=%d, count=%d\n",
req->rq_disk->disk_name, block, nsect);
end_request(req, 0);
hd_end_request_cur(-EIO);
goto repeat;
}
@ -647,7 +651,7 @@ repeat:
break;
default:
printk("unknown hd-command\n");
end_request(req, 0);
hd_end_request_cur(-EIO);
break;
}
}
@ -720,7 +724,7 @@ static int __init hd_init(void)
blk_queue_max_sectors(hd_queue, 255);
init_timer(&device_timer);
device_timer.function = hd_times_out;
blk_queue_hardsect_size(hd_queue, 512);
blk_queue_logical_block_size(hd_queue, 512);
if (!NR_HD) {
/*

View File

@ -511,11 +511,7 @@ out:
*/
static void loop_add_bio(struct loop_device *lo, struct bio *bio)
{
if (lo->lo_biotail) {
lo->lo_biotail->bi_next = bio;
lo->lo_biotail = bio;
} else
lo->lo_bio = lo->lo_biotail = bio;
bio_list_add(&lo->lo_bio_list, bio);
}
/*
@ -523,16 +519,7 @@ static void loop_add_bio(struct loop_device *lo, struct bio *bio)
*/
static struct bio *loop_get_bio(struct loop_device *lo)
{
struct bio *bio;
if ((bio = lo->lo_bio)) {
if (bio == lo->lo_biotail)
lo->lo_biotail = NULL;
lo->lo_bio = bio->bi_next;
bio->bi_next = NULL;
}
return bio;
return bio_list_pop(&lo->lo_bio_list);
}
static int loop_make_request(struct request_queue *q, struct bio *old_bio)
@ -609,12 +596,13 @@ static int loop_thread(void *data)
set_user_nice(current, -20);
while (!kthread_should_stop() || lo->lo_bio) {
while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
wait_event_interruptible(lo->lo_event,
lo->lo_bio || kthread_should_stop());
!bio_list_empty(&lo->lo_bio_list) ||
kthread_should_stop());
if (!lo->lo_bio)
if (bio_list_empty(&lo->lo_bio_list))
continue;
spin_lock_irq(&lo->lo_lock);
bio = loop_get_bio(lo);
@ -721,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
goto out_putf;
/* new backing store needs to support loop (eg splice_read) */
if (!inode->i_fop->splice_read)
goto out_putf;
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_putf;
@ -800,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
error = -EINVAL;
if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
const struct address_space_operations *aops = mapping->a_ops;
/*
* If we can't read - sorry. If we only can't write - well,
* it's going to be read-only.
*/
if (!file->f_op->splice_read)
goto out_putf;
if (aops->write_begin)
lo_flags |= LO_FLAGS_USE_AOPS;
if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
@ -841,7 +820,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->old_gfp_mask = mapping_gfp_mask(mapping);
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
lo->lo_bio = lo->lo_biotail = NULL;
bio_list_init(&lo->lo_bio_list);
/*
* set queue make_request_fn, and add limits based on lower level

View File

@ -17,71 +17,220 @@
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/libata.h>
#include <linux/ata.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/mg_disk.h>
#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
/* name for block device */
#define MG_DISK_NAME "mgd"
/* name for platform device */
#define MG_DEV_NAME "mg_disk"
#define MG_DISK_MAJ 0
#define MG_DISK_MAX_PART 16
#define MG_SECTOR_SIZE 512
#define MG_MAX_SECTS 256
/* Register offsets */
#define MG_BUFF_OFFSET 0x8000
#define MG_STORAGE_BUFFER_SIZE 0x200
#define MG_REG_OFFSET 0xC000
#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
/* handy status */
#define MG_STAT_READY (ATA_DRDY | ATA_DSC)
#define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \
ATA_ERR))) == MG_STAT_READY)
/* error code for others */
#define MG_ERR_NONE 0
#define MG_ERR_TIMEOUT 0x100
#define MG_ERR_INIT_STAT 0x101
#define MG_ERR_TRANSLATION 0x102
#define MG_ERR_CTRL_RST 0x103
#define MG_ERR_INV_STAT 0x104
#define MG_ERR_RSTOUT 0x105
#define MG_MAX_ERRORS 6 /* Max read/write errors */
/* command */
#define MG_CMD_RD 0x20
#define MG_CMD_WR 0x30
#define MG_CMD_SLEEP 0x99
#define MG_CMD_WAKEUP 0xC3
#define MG_CMD_ID 0xEC
#define MG_CMD_WR_CONF 0x3C
#define MG_CMD_RD_CONF 0x40
/* operation mode */
#define MG_OP_CASCADE (1 << 0)
#define MG_OP_CASCADE_SYNC_RD (1 << 1)
#define MG_OP_CASCADE_SYNC_WR (1 << 2)
#define MG_OP_INTERLEAVE (1 << 3)
/* synchronous */
#define MG_BURST_LAT_4 (3 << 4)
#define MG_BURST_LAT_5 (4 << 4)
#define MG_BURST_LAT_6 (5 << 4)
#define MG_BURST_LAT_7 (6 << 4)
#define MG_BURST_LAT_8 (7 << 4)
#define MG_BURST_LEN_4 (1 << 1)
#define MG_BURST_LEN_8 (2 << 1)
#define MG_BURST_LEN_16 (3 << 1)
#define MG_BURST_LEN_32 (4 << 1)
#define MG_BURST_LEN_CONT (0 << 1)
/* timeout value (unit: ms) */
#define MG_TMAX_CONF_TO_CMD 1
#define MG_TMAX_WAIT_RD_DRQ 10
#define MG_TMAX_WAIT_WR_DRQ 500
#define MG_TMAX_RST_TO_BUSY 10
#define MG_TMAX_HDRST_TO_RDY 500
#define MG_TMAX_SWRST_TO_RDY 500
#define MG_TMAX_RSTOUT 3000
/* device attribution */
/* use mflash as boot device */
#define MG_BOOT_DEV (1 << 0)
/* use mflash as storage device */
#define MG_STORAGE_DEV (1 << 1)
/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
/* names of GPIO resource */
#define MG_RST_PIN "mg_rst"
/* except MG_BOOT_DEV, reset-out pin should be assigned */
#define MG_RSTOUT_PIN "mg_rstout"
/* private driver data */
struct mg_drv_data {
/* disk resource */
u32 use_polling;
/* device attribution */
u32 dev_attr;
/* internally used */
struct mg_host *host;
};
/* main structure for mflash driver */
struct mg_host {
struct device *dev;
struct request_queue *breq;
struct request *req;
spinlock_t lock;
struct gendisk *gd;
struct timer_list timer;
void (*mg_do_intr) (struct mg_host *);
u16 id[ATA_ID_WORDS];
u16 cyls;
u16 heads;
u16 sectors;
u32 n_sectors;
u32 nres_sectors;
void __iomem *dev_base;
unsigned int irq;
unsigned int rst;
unsigned int rstout;
u32 major;
u32 error;
};
/*
* Debugging macro and defines
*/
#undef DO_MG_DEBUG
#ifdef DO_MG_DEBUG
# define MG_DBG(fmt, args...) \
printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
#else /* CONFIG_MG_DEBUG */
# define MG_DBG(fmt, args...) do { } while (0)
#endif /* CONFIG_MG_DEBUG */
static void mg_request(struct request_queue *);
static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes)
{
if (__blk_end_request(host->req, err, nr_bytes))
return true;
host->req = NULL;
return false;
}
static bool mg_end_request_cur(struct mg_host *host, int err)
{
return mg_end_request(host, err, blk_rq_cur_bytes(host->req));
}
static void mg_dump_status(const char *msg, unsigned int stat,
struct mg_host *host)
{
char *name = MG_DISK_NAME;
struct request *req;
if (host->breq) {
req = elv_next_request(host->breq);
if (req)
name = req->rq_disk->disk_name;
}
if (host->req)
name = host->req->rq_disk->disk_name;
printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
if (stat & MG_REG_STATUS_BIT_BUSY)
if (stat & ATA_BUSY)
printk("Busy ");
if (stat & MG_REG_STATUS_BIT_READY)
if (stat & ATA_DRDY)
printk("DriveReady ");
if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
if (stat & ATA_DF)
printk("WriteFault ");
if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
if (stat & ATA_DSC)
printk("SeekComplete ");
if (stat & MG_REG_STATUS_BIT_DATA_REQ)
if (stat & ATA_DRQ)
printk("DataRequest ");
if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
if (stat & ATA_CORR)
printk("CorrectedError ");
if (stat & MG_REG_STATUS_BIT_ERROR)
if (stat & ATA_ERR)
printk("Error ");
printk("}\n");
if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
if ((stat & ATA_ERR) == 0) {
host->error = 0;
} else {
host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
host->error & 0xff);
if (host->error & MG_REG_ERR_BBK)
if (host->error & ATA_BBK)
printk("BadSector ");
if (host->error & MG_REG_ERR_UNC)
if (host->error & ATA_UNC)
printk("UncorrectableError ");
if (host->error & MG_REG_ERR_IDNF)
if (host->error & ATA_IDNF)
printk("SectorIdNotFound ");
if (host->error & MG_REG_ERR_ABRT)
if (host->error & ATA_ABORTED)
printk("DriveStatusError ");
if (host->error & MG_REG_ERR_AMNF)
if (host->error & ATA_AMNF)
printk("AddrMarkNotFound ");
printk("}");
if (host->error &
(MG_REG_ERR_BBK | MG_REG_ERR_UNC |
MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
if (host->breq) {
req = elv_next_request(host->breq);
if (req)
printk(", sector=%u", (u32)req->sector);
}
if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) {
if (host->req)
printk(", sector=%u",
(unsigned int)blk_rq_pos(host->req));
}
printk("\n");
}
@ -100,12 +249,12 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
do {
cur_jiffies = jiffies;
if (status & MG_REG_STATUS_BIT_BUSY) {
if (expect == MG_REG_STATUS_BIT_BUSY)
if (status & ATA_BUSY) {
if (expect == ATA_BUSY)
break;
} else {
/* Check the error condition! */
if (status & MG_REG_STATUS_BIT_ERROR) {
if (status & ATA_ERR) {
mg_dump_status("mg_wait", status, host);
break;
}
@ -114,8 +263,8 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
if (MG_READY_OK(status))
break;
if (expect == MG_REG_STATUS_BIT_DATA_REQ)
if (status & MG_REG_STATUS_BIT_DATA_REQ)
if (expect == ATA_DRQ)
if (status & ATA_DRQ)
break;
}
if (!msec) {
@ -173,6 +322,42 @@ static irqreturn_t mg_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
/* local copy of ata_id_string() */
static void mg_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len)
{
unsigned int c;
BUG_ON(len & 1);
while (len > 0) {
c = id[ofs] >> 8;
*s = c;
s++;
c = id[ofs] & 0xff;
*s = c;
s++;
ofs++;
len -= 2;
}
}
/* local copy of ata_id_c_string() */
static void mg_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len)
{
unsigned char *p;
mg_id_string(id, s, ofs, len - 1);
p = s + strnlen(s, len - 1);
while (p > s && p[-1] == ' ')
p--;
*p = '\0';
}
static int mg_get_disk_id(struct mg_host *host)
{
u32 i;
@ -184,12 +369,10 @@ static int mg_get_disk_id(struct mg_host *host)
char serial[ATA_ID_SERNO_LEN + 1];
if (!prv_data->use_polling)
outb(MG_REG_CTRL_INTR_DISABLE,
(unsigned long)host->dev_base +
MG_REG_DRV_CTRL);
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ);
if (err)
return err;
@ -219,9 +402,9 @@ static int mg_get_disk_id(struct mg_host *host)
host->n_sectors -= host->nres_sectors;
}
ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
printk(KERN_INFO "mg_disk: model: %s\n", model);
printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
printk(KERN_INFO "mg_disk: serial: %s\n", serial);
@ -229,8 +412,7 @@ static int mg_get_disk_id(struct mg_host *host)
host->n_sectors, host->nres_sectors);
if (!prv_data->use_polling)
outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
MG_REG_DRV_CTRL);
outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return err;
}
@ -244,7 +426,7 @@ static int mg_disk_init(struct mg_host *host)
/* hdd rst low */
gpio_set_value(host->rst, 0);
err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
if (err)
return err;
@ -255,17 +437,14 @@ static int mg_disk_init(struct mg_host *host)
return err;
/* soft reset on */
outb(MG_REG_CTRL_RESET |
(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
MG_REG_CTRL_INTR_ENABLE),
outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0),
(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY);
if (err)
return err;
/* soft reset off */
outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
MG_REG_CTRL_INTR_ENABLE,
outb(prv_data->use_polling ? ATA_NIEN : 0,
(unsigned long)host->dev_base + MG_REG_DRV_CTRL);
err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
if (err)
@ -281,11 +460,10 @@ static int mg_disk_init(struct mg_host *host)
static void mg_bad_rw_intr(struct mg_host *host)
{
struct request *req = elv_next_request(host->breq);
if (req != NULL)
if (++req->errors >= MG_MAX_ERRORS ||
host->error == MG_ERR_TIMEOUT)
end_request(req, 0);
if (host->req)
if (++host->req->errors >= MG_MAX_ERRORS ||
host->error == MG_ERR_TIMEOUT)
mg_end_request_cur(host, -EIO);
}
static unsigned int mg_out(struct mg_host *host,
@ -311,7 +489,7 @@ static unsigned int mg_out(struct mg_host *host,
MG_REG_CYL_LOW);
outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
MG_REG_CYL_HIGH);
outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS),
(unsigned long)host->dev_base + MG_REG_DRV_HEAD);
outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
return MG_ERR_NONE;
@ -319,105 +497,77 @@ static unsigned int mg_out(struct mg_host *host,
static void mg_read(struct request *req)
{
u32 remains, j;
u32 j;
struct mg_host *host = req->rq_disk->private_data;
remains = req->nr_sectors;
if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) !=
MG_ERR_NONE)
if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
MG_CMD_RD, NULL) != MG_ERR_NONE)
mg_bad_rw_intr(host);
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
remains, req->sector, req->buffer);
blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
while (remains) {
if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
do {
u16 *buff = (u16 *)req->buffer;
if (mg_wait(host, ATA_DRQ,
MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
*(u16 *)req->buffer =
inw((unsigned long)host->dev_base +
MG_BUFF_OFFSET + (j << 1));
req->buffer += 2;
}
req->sector++;
req->errors = 0;
remains = --req->nr_sectors;
--req->current_nr_sectors;
if (req->current_nr_sectors <= 0) {
MG_DBG("remain : %d sects\n", remains);
end_request(req, 1);
if (remains > 0)
req = elv_next_request(host->breq);
}
for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
*buff++ = inw((unsigned long)host->dev_base +
MG_BUFF_OFFSET + (j << 1));
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
}
} while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}
static void mg_write(struct request *req)
{
u32 remains, j;
u32 j;
struct mg_host *host = req->rq_disk->private_data;
remains = req->nr_sectors;
if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) !=
MG_ERR_NONE) {
if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
MG_CMD_WR, NULL) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
remains, req->sector, req->buffer);
while (remains) {
if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
do {
u16 *buff = (u16 *)req->buffer;
if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
mg_bad_rw_intr(host);
return;
}
for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
outw(*(u16 *)req->buffer,
(unsigned long)host->dev_base +
MG_BUFF_OFFSET + (j << 1));
req->buffer += 2;
}
req->sector++;
remains = --req->nr_sectors;
--req->current_nr_sectors;
if (req->current_nr_sectors <= 0) {
MG_DBG("remain : %d sects\n", remains);
end_request(req, 1);
if (remains > 0)
req = elv_next_request(host->breq);
}
for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
outw(*buff++, (unsigned long)host->dev_base +
MG_BUFF_OFFSET + (j << 1));
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
}
} while (mg_end_request(host, 0, MG_SECTOR_SIZE));
}
static void mg_read_intr(struct mg_host *host)
{
struct request *req = host->req;
u32 i;
struct request *req;
u16 *buff;
/* check status */
do {
i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
if (i & MG_REG_STATUS_BIT_BUSY)
if (i & ATA_BUSY)
break;
if (!MG_READY_OK(i))
break;
if (i & MG_REG_STATUS_BIT_DATA_REQ)
if (i & ATA_DRQ)
goto ok_to_read;
} while (0);
mg_dump_status("mg_read_intr", i, host);
@ -427,60 +577,42 @@ static void mg_read_intr(struct mg_host *host)
ok_to_read:
/* get current segment of request */
req = elv_next_request(host->breq);
buff = (u16 *)req->buffer;
/* read 1 sector */
for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
*(u16 *)req->buffer =
inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
(i << 1));
req->buffer += 2;
}
for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
*buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
(i << 1));
/* manipulate request */
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
req->sector, req->nr_sectors - 1, req->buffer);
req->sector++;
req->errors = 0;
i = --req->nr_sectors;
--req->current_nr_sectors;
/* let know if current segment done */
if (req->current_nr_sectors <= 0)
end_request(req, 1);
/* set handler if read remains */
if (i > 0) {
host->mg_do_intr = mg_read_intr;
mod_timer(&host->timer, jiffies + 3 * HZ);
}
blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
/* send read confirm */
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
/* goto next request */
if (!i)
if (mg_end_request(host, 0, MG_SECTOR_SIZE)) {
/* set handler if read remains */
host->mg_do_intr = mg_read_intr;
mod_timer(&host->timer, jiffies + 3 * HZ);
} else /* goto next request */
mg_request(host->breq);
}
static void mg_write_intr(struct mg_host *host)
{
struct request *req = host->req;
u32 i, j;
u16 *buff;
struct request *req;
/* get current segment of request */
req = elv_next_request(host->breq);
bool rem;
/* check status */
do {
i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
if (i & MG_REG_STATUS_BIT_BUSY)
if (i & ATA_BUSY)
break;
if (!MG_READY_OK(i))
break;
if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ))
goto ok_to_write;
} while (0);
mg_dump_status("mg_write_intr", i, host);
@ -489,18 +621,8 @@ static void mg_write_intr(struct mg_host *host)
return;
ok_to_write:
/* manipulate request */
req->sector++;
i = --req->nr_sectors;
--req->current_nr_sectors;
req->buffer += MG_SECTOR_SIZE;
/* let know if current segment or all done */
if (!i || (req->bio && req->current_nr_sectors <= 0))
end_request(req, 1);
/* write 1 sector and set handler if remains */
if (i > 0) {
if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
/* write 1 sector and set handler if remains */
buff = (u16 *)req->buffer;
for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
outw(*buff, (unsigned long)host->dev_base +
@ -508,7 +630,7 @@ ok_to_write:
buff++;
}
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
req->sector, req->nr_sectors, req->buffer);
blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
host->mg_do_intr = mg_write_intr;
mod_timer(&host->timer, jiffies + 3 * HZ);
}
@ -516,7 +638,7 @@ ok_to_write:
/* send write confirm */
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
if (!i)
if (!rem)
mg_request(host->breq);
}
@ -524,49 +646,45 @@ void mg_times_out(unsigned long data)
{
struct mg_host *host = (struct mg_host *)data;
char *name;
struct request *req;
spin_lock_irq(&host->lock);
req = elv_next_request(host->breq);
if (!req)
if (!host->req)
goto out_unlock;
host->mg_do_intr = NULL;
name = req->rq_disk->disk_name;
name = host->req->rq_disk->disk_name;
printk(KERN_DEBUG "%s: timeout\n", name);
host->error = MG_ERR_TIMEOUT;
mg_bad_rw_intr(host);
mg_request(host->breq);
out_unlock:
mg_request(host->breq);
spin_unlock_irq(&host->lock);
}
static void mg_request_poll(struct request_queue *q)
{
struct request *req;
struct mg_host *host;
struct mg_host *host = q->queuedata;
while ((req = elv_next_request(q)) != NULL) {
host = req->rq_disk->private_data;
if (blk_fs_request(req)) {
switch (rq_data_dir(req)) {
case READ:
mg_read(req);
while (1) {
if (!host->req) {
host->req = blk_fetch_request(q);
if (!host->req)
break;
case WRITE:
mg_write(req);
break;
default:
printk(KERN_WARNING "%s:%d unknown command\n",
__func__, __LINE__);
end_request(req, 0);
break;
}
}
if (unlikely(!blk_fs_request(host->req))) {
mg_end_request_cur(host, -EIO);
continue;
}
if (rq_data_dir(host->req) == READ)
mg_read(host->req);
else
mg_write(host->req);
}
}
@ -588,18 +706,15 @@ static unsigned int mg_issue_req(struct request *req,
break;
case WRITE:
/* TODO : handler */
outb(MG_REG_CTRL_INTR_DISABLE,
(unsigned long)host->dev_base +
MG_REG_DRV_CTRL);
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
!= MG_ERR_NONE) {
mg_bad_rw_intr(host);
return host->error;
}
del_timer(&host->timer);
mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
MG_REG_DRV_CTRL);
mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ);
outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
if (host->error) {
mg_bad_rw_intr(host);
return host->error;
@ -614,11 +729,6 @@ static unsigned int mg_issue_req(struct request *req,
outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
MG_REG_COMMAND);
break;
default:
printk(KERN_WARNING "%s:%d unknown command\n",
__func__, __LINE__);
end_request(req, 0);
break;
}
return MG_ERR_NONE;
}
@ -626,16 +736,17 @@ static unsigned int mg_issue_req(struct request *req,
/* This function also called from IRQ context */
static void mg_request(struct request_queue *q)
{
struct mg_host *host = q->queuedata;
struct request *req;
struct mg_host *host;
u32 sect_num, sect_cnt;
while (1) {
req = elv_next_request(q);
if (!req)
return;
host = req->rq_disk->private_data;
if (!host->req) {
host->req = blk_fetch_request(q);
if (!host->req)
break;
}
req = host->req;
/* check unwanted request call */
if (host->mg_do_intr)
@ -643,9 +754,9 @@ static void mg_request(struct request_queue *q)
del_timer(&host->timer);
sect_num = req->sector;
sect_num = blk_rq_pos(req);
/* deal whole segments */
sect_cnt = req->nr_sectors;
sect_cnt = blk_rq_sectors(req);
/* sanity check */
if (sect_num >= get_capacity(req->rq_disk) ||
@ -655,12 +766,14 @@ static void mg_request(struct request_queue *q)
"%s: bad access: sector=%d, count=%d\n",
req->rq_disk->disk_name,
sect_num, sect_cnt);
end_request(req, 0);
mg_end_request_cur(host, -EIO);
continue;
}
if (!blk_fs_request(req))
return;
if (unlikely(!blk_fs_request(req))) {
mg_end_request_cur(host, -EIO);
continue;
}
if (!mg_issue_req(req, host, sect_num, sect_cnt))
return;
@ -690,9 +803,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
return -EIO;
if (!prv_data->use_polling)
outb(MG_REG_CTRL_INTR_DISABLE,
(unsigned long)host->dev_base +
MG_REG_DRV_CTRL);
outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
/* wait until mflash deep sleep */
@ -700,9 +811,7 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
if (!prv_data->use_polling)
outb(MG_REG_CTRL_INTR_ENABLE,
(unsigned long)host->dev_base +
MG_REG_DRV_CTRL);
outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return -EIO;
}
@ -725,8 +834,7 @@ static int mg_resume(struct platform_device *plat_dev)
return -EIO;
if (!prv_data->use_polling)
outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
MG_REG_DRV_CTRL);
outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
return 0;
}
@ -877,6 +985,7 @@ static int mg_probe(struct platform_device *plat_dev)
__func__, __LINE__);
goto probe_err_5;
}
host->breq->queuedata = host;
/* mflash is random device, thanx for the noop */
elevator_exit(host->breq->elevator);
@ -887,7 +996,7 @@ static int mg_probe(struct platform_device *plat_dev)
goto probe_err_6;
}
blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE);
init_timer(&host->timer);
host->timer.function = mg_times_out;

View File

@ -110,7 +110,7 @@ static void nbd_end_request(struct request *req)
req, error ? "failed" : "done");
spin_lock_irqsave(q->queue_lock, flags);
__blk_end_request(req, error, req->nr_sectors << 9);
__blk_end_request_all(req, error);
spin_unlock_irqrestore(q->queue_lock, flags);
}
@ -231,19 +231,19 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req)
{
int result, flags;
struct nbd_request request;
unsigned long size = req->nr_sectors << 9;
unsigned long size = blk_rq_bytes(req);
request.magic = htonl(NBD_REQUEST_MAGIC);
request.type = htonl(nbd_cmd(req));
request.from = cpu_to_be64((u64) req->sector << 9);
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
memcpy(request.handle, &req, sizeof(req));
dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n",
lo->disk->disk_name, req,
nbdcmd_to_ascii(nbd_cmd(req)),
(unsigned long long)req->sector << 9,
req->nr_sectors << 9);
(unsigned long long)blk_rq_pos(req) << 9,
blk_rq_bytes(req));
result = sock_xmit(lo, 1, &request, sizeof(request),
(nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0);
if (result <= 0) {
@ -533,11 +533,9 @@ static void do_nbd_request(struct request_queue *q)
{
struct request *req;
while ((req = elv_next_request(q)) != NULL) {
while ((req = blk_fetch_request(q)) != NULL) {
struct nbd_device *lo;
blkdev_dequeue_request(req);
spin_unlock_irq(q->queue_lock);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
@ -580,13 +578,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
blk_rq_init(NULL, &sreq);
sreq.cmd_type = REQ_TYPE_SPECIAL;
nbd_cmd(&sreq) = NBD_CMD_DISC;
/*
* Set these to sane values in case server implementation
* fails to check the request type first and also to keep
* debugging output cleaner.
*/
sreq.sector = 0;
sreq.nr_sectors = 0;
if (!lo->sock)
return -EINVAL;
nbd_send_req(lo, &sreq);

View File

@ -719,32 +719,37 @@ static void do_pcd_request(struct request_queue * q)
if (pcd_busy)
return;
while (1) {
pcd_req = elv_next_request(q);
if (!pcd_req)
return;
if (!pcd_req) {
pcd_req = blk_fetch_request(q);
if (!pcd_req)
return;
}
if (rq_data_dir(pcd_req) == READ) {
struct pcd_unit *cd = pcd_req->rq_disk->private_data;
if (cd != pcd_current)
pcd_bufblk = -1;
pcd_current = cd;
pcd_sector = pcd_req->sector;
pcd_count = pcd_req->current_nr_sectors;
pcd_sector = blk_rq_pos(pcd_req);
pcd_count = blk_rq_cur_sectors(pcd_req);
pcd_buf = pcd_req->buffer;
pcd_busy = 1;
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
} else
end_request(pcd_req, 0);
} else {
__blk_end_request_all(pcd_req, -EIO);
pcd_req = NULL;
}
}
}
static inline void next_request(int success)
static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pcd_lock, saved_flags);
end_request(pcd_req, success);
if (!__blk_end_request_cur(pcd_req, err))
pcd_req = NULL;
pcd_busy = 0;
do_pcd_request(pcd_queue);
spin_unlock_irqrestore(&pcd_lock, saved_flags);
@ -781,7 +786,7 @@ static void pcd_start(void)
if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
pcd_bufblk = -1;
next_request(0);
next_request(-EIO);
return;
}
@ -796,7 +801,7 @@ static void do_pcd_read(void)
pcd_retries = 0;
pcd_transfer();
if (!pcd_count) {
next_request(1);
next_request(0);
return;
}
@ -815,7 +820,7 @@ static void do_pcd_read_drq(void)
return;
}
pcd_bufblk = -1;
next_request(0);
next_request(-EIO);
return;
}

View File

@ -410,10 +410,12 @@ static void run_fsm(void)
pd_claimed = 0;
phase = NULL;
spin_lock_irqsave(&pd_lock, saved_flags);
end_request(pd_req, res);
pd_req = elv_next_request(pd_queue);
if (!pd_req)
stop = 1;
if (!__blk_end_request_cur(pd_req,
res == Ok ? 0 : -EIO)) {
pd_req = blk_fetch_request(pd_queue);
if (!pd_req)
stop = 1;
}
spin_unlock_irqrestore(&pd_lock, saved_flags);
if (stop)
return;
@ -443,11 +445,11 @@ static enum action do_pd_io_start(void)
pd_cmd = rq_data_dir(pd_req);
if (pd_cmd == READ || pd_cmd == WRITE) {
pd_block = pd_req->sector;
pd_count = pd_req->current_nr_sectors;
pd_block = blk_rq_pos(pd_req);
pd_count = blk_rq_cur_sectors(pd_req);
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
return Fail;
pd_run = pd_req->nr_sectors;
pd_run = blk_rq_sectors(pd_req);
pd_buf = pd_req->buffer;
pd_retries = 0;
if (pd_cmd == READ)
@ -477,8 +479,8 @@ static int pd_next_buf(void)
if (pd_count)
return 0;
spin_lock_irqsave(&pd_lock, saved_flags);
end_request(pd_req, 1);
pd_count = pd_req->current_nr_sectors;
__blk_end_request_cur(pd_req, 0);
pd_count = blk_rq_cur_sectors(pd_req);
pd_buf = pd_req->buffer;
spin_unlock_irqrestore(&pd_lock, saved_flags);
return 0;
@ -702,7 +704,7 @@ static void do_pd_request(struct request_queue * q)
{
if (pd_req)
return;
pd_req = elv_next_request(q);
pd_req = blk_fetch_request(q);
if (!pd_req)
return;

View File

@ -750,12 +750,10 @@ static int pf_ready(void)
static struct request_queue *pf_queue;
static void pf_end_request(int uptodate)
static void pf_end_request(int err)
{
if (pf_req) {
end_request(pf_req, uptodate);
if (pf_req && !__blk_end_request_cur(pf_req, err))
pf_req = NULL;
}
}
static void do_pf_request(struct request_queue * q)
@ -763,17 +761,19 @@ static void do_pf_request(struct request_queue * q)
if (pf_busy)
return;
repeat:
pf_req = elv_next_request(q);
if (!pf_req)
return;
if (!pf_req) {
pf_req = blk_fetch_request(q);
if (!pf_req)
return;
}
pf_current = pf_req->rq_disk->private_data;
pf_block = pf_req->sector;
pf_run = pf_req->nr_sectors;
pf_count = pf_req->current_nr_sectors;
pf_block = blk_rq_pos(pf_req);
pf_run = blk_rq_sectors(pf_req);
pf_count = blk_rq_cur_sectors(pf_req);
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
pf_end_request(0);
pf_end_request(-EIO);
goto repeat;
}
@ -788,7 +788,7 @@ repeat:
pi_do_claimed(pf_current->pi, do_pf_write);
else {
pf_busy = 0;
pf_end_request(0);
pf_end_request(-EIO);
goto repeat;
}
}
@ -805,23 +805,22 @@ static int pf_next_buf(void)
return 1;
if (!pf_count) {
spin_lock_irqsave(&pf_spin_lock, saved_flags);
pf_end_request(1);
pf_req = elv_next_request(pf_queue);
pf_end_request(0);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
if (!pf_req)
return 1;
pf_count = pf_req->current_nr_sectors;
pf_count = blk_rq_cur_sectors(pf_req);
pf_buf = pf_req->buffer;
}
return 0;
}
static inline void next_request(int success)
static inline void next_request(int err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pf_spin_lock, saved_flags);
pf_end_request(success);
pf_end_request(err);
pf_busy = 0;
do_pf_request(pf_queue);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
@ -844,7 +843,7 @@ static void do_pf_read_start(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
next_request(0);
next_request(-EIO);
return;
}
pf_mask = STAT_DRQ;
@ -863,7 +862,7 @@ static void do_pf_read_drq(void)
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
next_request(0);
next_request(-EIO);
return;
}
pi_read_block(pf_current->pi, pf_buf, 512);
@ -871,7 +870,7 @@ static void do_pf_read_drq(void)
break;
}
pi_disconnect(pf_current->pi);
next_request(1);
next_request(0);
}
static void do_pf_write(void)
@ -890,7 +889,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
next_request(0);
next_request(-EIO);
return;
}
@ -903,7 +902,7 @@ static void do_pf_write_start(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
next_request(0);
next_request(-EIO);
return;
}
pi_write_block(pf_current->pi, pf_buf, 512);
@ -923,11 +922,11 @@ static void do_pf_write_done(void)
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
next_request(0);
next_request(-EIO);
return;
}
pi_disconnect(pf_current->pi);
next_request(1);
next_request(0);
}
static int __init pf_init(void)

View File

@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
*/
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
if ((pd->settings.size << 9) / CD_FRAMESIZE
<= queue_max_phys_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
} else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
} else if ((pd->settings.size << 9) / PAGE_SIZE
<= queue_max_phys_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
@ -2657,7 +2659,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
struct request_queue *q = pd->disk->queue;
blk_queue_make_request(q, pkt_make_request);
blk_queue_hardsect_size(q, CD_FRAMESIZE);
blk_queue_logical_block_size(q, CD_FRAMESIZE);
blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
blk_queue_merge_bvec(q, pkt_merge_bvec);
q->queuedata = pd;

View File

@ -134,13 +134,12 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
rq_for_each_segment(bv, req, iter)
n++;
dev_dbg(&dev->sbd.core,
"%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n",
__func__, __LINE__, op, n, req->nr_sectors,
req->hard_nr_sectors);
"%s:%u: %s req has %u bvecs for %u sectors\n",
__func__, __LINE__, op, n, blk_rq_sectors(req));
#endif
start_sector = req->sector * priv->blocking_factor;
sectors = req->nr_sectors * priv->blocking_factor;
start_sector = blk_rq_pos(req) * priv->blocking_factor;
sectors = blk_rq_sectors(req) * priv->blocking_factor;
dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n",
__func__, __LINE__, op, sectors, start_sector);
@ -158,7 +157,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
end_request(req, 0);
__blk_end_request_all(req, -EIO);
return 0;
}
@ -180,7 +179,7 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
end_request(req, 0);
__blk_end_request_all(req, -EIO);
return 0;
}
@ -195,7 +194,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = elv_next_request(q))) {
while ((req = blk_fetch_request(q))) {
if (blk_fs_request(req)) {
if (ps3disk_submit_request_sg(dev, req))
break;
@ -205,7 +204,7 @@ static void ps3disk_do_request(struct ps3_storage_device *dev,
break;
} else {
blk_dump_rq_flags(req, DEVICE_NAME " bad request");
end_request(req, 0);
__blk_end_request_all(req, -EIO);
continue;
}
}
@ -231,7 +230,6 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
struct request *req;
int res, read, error;
u64 tag, status;
unsigned long num_sectors;
const char *op;
res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status);
@ -261,11 +259,9 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_FLUSH) {
read = 0;
num_sectors = req->hard_cur_sectors;
op = "flush";
} else {
read = !rq_data_dir(req);
num_sectors = req->nr_sectors;
op = read ? "read" : "write";
}
if (status) {
@ -281,7 +277,7 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
}
spin_lock(&priv->lock);
__blk_end_request(req, error, num_sectors << 9);
__blk_end_request_all(req, error);
priv->req = NULL;
ps3disk_do_request(dev, priv->queue);
spin_unlock(&priv->lock);
@ -481,7 +477,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev)
blk_queue_max_sectors(queue, dev->bounce_size >> 9);
blk_queue_segment_boundary(queue, -1UL);
blk_queue_dma_alignment(queue, dev->blk_size-1);
blk_queue_hardsect_size(queue, dev->blk_size);
blk_queue_logical_block_size(queue, dev->blk_size);
blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH,
ps3disk_prepare_flush);

View File

@ -212,11 +212,6 @@ static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
}
static void vdc_end_request(struct request *req, int error, int num_sectors)
{
__blk_end_request(req, error, num_sectors << 9);
}
static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
unsigned int index)
{
@ -239,7 +234,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
rqe->req = NULL;
vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9);
__blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
if (blk_queue_stopped(port->disk->queue))
blk_start_queue(port->disk->queue);
@ -421,7 +416,7 @@ static int __send_request(struct request *req)
desc->slice = 0;
}
desc->status = ~0;
desc->offset = (req->sector << 9) / port->vdisk_block_size;
desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
desc->size = len;
desc->ncookies = err;
@ -446,14 +441,13 @@ out:
static void do_vdc_request(struct request_queue *q)
{
while (1) {
struct request *req = elv_next_request(q);
struct request *req = blk_fetch_request(q);
if (!req)
break;
blkdev_dequeue_request(req);
if (__send_request(req) < 0)
vdc_end_request(req, -EIO, req->hard_nr_sectors);
__blk_end_request_all(req, -EIO);
}
}

View File

@ -514,7 +514,7 @@ static int floppy_read_sectors(struct floppy_state *fs,
ret = swim_read_sector(fs, side, track, sector,
buffer);
if (try-- == 0)
return -1;
return -EIO;
} while (ret != 512);
buffer += ret;
@ -528,45 +528,31 @@ static void redo_fd_request(struct request_queue *q)
struct request *req;
struct floppy_state *fs;
while ((req = elv_next_request(q))) {
req = blk_fetch_request(q);
while (req) {
int err = -EIO;
fs = req->rq_disk->private_data;
if (req->sector < 0 || req->sector >= fs->total_secs) {
end_request(req, 0);
continue;
}
if (req->current_nr_sectors == 0) {
end_request(req, 1);
continue;
}
if (!fs->disk_in) {
end_request(req, 0);
continue;
}
if (rq_data_dir(req) == WRITE) {
if (fs->write_protected) {
end_request(req, 0);
continue;
}
}
if (blk_rq_pos(req) >= fs->total_secs)
goto done;
if (!fs->disk_in)
goto done;
if (rq_data_dir(req) == WRITE && fs->write_protected)
goto done;
switch (rq_data_dir(req)) {
case WRITE:
/* NOT IMPLEMENTED */
end_request(req, 0);
break;
case READ:
if (floppy_read_sectors(fs, req->sector,
req->current_nr_sectors,
req->buffer)) {
end_request(req, 0);
continue;
}
req->nr_sectors -= req->current_nr_sectors;
req->sector += req->current_nr_sectors;
req->buffer += req->current_nr_sectors * 512;
end_request(req, 1);
err = floppy_read_sectors(fs, blk_rq_pos(req),
blk_rq_cur_sectors(req),
req->buffer);
break;
}
done:
if (!__blk_end_request_cur(req, err))
req = blk_fetch_request(q);
}
}

View File

@ -251,6 +251,20 @@ static int floppy_release(struct gendisk *disk, fmode_t mode);
static int floppy_check_change(struct gendisk *disk);
static int floppy_revalidate(struct gendisk *disk);
static bool swim3_end_request(int err, unsigned int nr_bytes)
{
if (__blk_end_request(fd_req, err, nr_bytes))
return true;
fd_req = NULL;
return false;
}
static bool swim3_end_request_cur(int err)
{
return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
}
static void swim3_select(struct floppy_state *fs, int sel)
{
struct swim3 __iomem *sw = fs->swim3;
@ -310,25 +324,27 @@ static void start_request(struct floppy_state *fs)
wake_up(&fs->wait);
return;
}
while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
while (fs->state == idle) {
if (!fd_req) {
fd_req = blk_fetch_request(swim3_queue);
if (!fd_req)
break;
}
req = fd_req;
#if 0
printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
(long)req->sector, req->nr_sectors, req->buffer);
printk(" errors=%d current_nr_sectors=%ld\n",
req->errors, req->current_nr_sectors);
(long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
printk(" errors=%d current_nr_sectors=%u\n",
req->errors, blk_rq_cur_sectors(req));
#endif
if (req->sector < 0 || req->sector >= fs->total_secs) {
end_request(req, 0);
continue;
}
if (req->current_nr_sectors == 0) {
end_request(req, 1);
if (blk_rq_pos(req) >= fs->total_secs) {
swim3_end_request_cur(-EIO);
continue;
}
if (fs->ejected) {
end_request(req, 0);
swim3_end_request_cur(-EIO);
continue;
}
@ -336,18 +352,19 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
end_request(req, 0);
swim3_end_request_cur(-EIO);
continue;
}
}
/* Do not remove the cast. req->sector is now a sector_t and
* can be 64 bits, but it will never go past 32 bits for this
* driver anyway, so we can safely cast it down and not have
* to do a 64/32 division
/* Do not remove the cast. blk_rq_pos(req) is now a
* sector_t and can be 64 bits, but it will never go
* past 32 bits for this driver anyway, so we can
* safely cast it down and not have to do a 64/32
* division
*/
fs->req_cyl = ((long)req->sector) / fs->secpercyl;
x = ((long)req->sector) % fs->secpercyl;
fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fd_req = req;
@ -424,7 +441,7 @@ static inline void setup_transfer(struct floppy_state *fs)
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
if (fd_req->current_nr_sectors <= 0) {
if (blk_rq_cur_sectors(fd_req) <= 0) {
printk(KERN_ERR "swim3: transfer 0 sectors?\n");
return;
}
@ -432,8 +449,8 @@ static inline void setup_transfer(struct floppy_state *fs)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
if (n > fd_req->current_nr_sectors)
n = fd_req->current_nr_sectors;
if (n > blk_rq_cur_sectors(fd_req))
n = blk_rq_cur_sectors(fd_req);
}
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
@ -508,7 +525,7 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
end_request(fd_req, 0);
swim3_end_request_cur(-EIO);
fs->state = idle;
return;
}
@ -540,7 +557,7 @@ static void scan_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
end_request(fd_req, 0);
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
@ -559,7 +576,7 @@ static void seek_timeout(unsigned long data)
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
printk(KERN_ERR "swim3: seek timeout\n");
end_request(fd_req, 0);
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@ -583,7 +600,7 @@ static void settle_timeout(unsigned long data)
return;
}
printk(KERN_ERR "swim3: seek settle timeout\n");
end_request(fd_req, 0);
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@ -593,8 +610,6 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
struct dbdma_cmd *cp = fs->dma_cmd;
unsigned long s;
int n;
fs->timeout_pending = 0;
@ -605,17 +620,10 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
if (rq_data_dir(fd_req) == WRITE)
++cp;
if (ld_le16(&cp->xfer_status) != 0)
s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
else
s = 0;
fd_req->sector += s;
fd_req->current_nr_sectors -= s;
printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
(rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
end_request(fd_req, 0);
(rq_data_dir(fd_req)==WRITE? "writ": "read"),
(long)blk_rq_pos(fd_req));
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
}
@ -646,7 +654,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
end_request(fd_req, 0);
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
} else {
@ -719,9 +727,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
fd_req->sector += n;
fd_req->current_nr_sectors -= n;
fd_req->buffer += n * 512;
blk_update_request(fd_req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
@ -730,8 +736,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
} else {
printk("swim3: error %sing block %ld (err=%x)\n",
rq_data_dir(fd_req) == WRITE? "writ": "read",
(long)fd_req->sector, err);
end_request(fd_req, 0);
(long)blk_rq_pos(fd_req), err);
swim3_end_request_cur(-EIO);
fs->state = idle;
}
} else {
@ -740,18 +746,12 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(fd_req), intr, err);
end_request(fd_req, 0);
swim3_end_request_cur(-EIO);
fs->state = idle;
start_request(fs);
break;
}
fd_req->sector += fs->scount;
fd_req->current_nr_sectors -= fs->scount;
fd_req->buffer += fs->scount * 512;
if (fd_req->current_nr_sectors <= 0) {
end_request(fd_req, 1);
fs->state = idle;
} else {
if (swim3_end_request(0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@ -761,7 +761,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
}
}
act(fs);
}
} else
fs->state = idle;
}
if (fs->state == idle)
start_request(fs);

View File

@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
struct request *req = crq->rq;
int rc;
rc = __blk_end_request(req, error, blk_rq_bytes(req));
assert(rc == 0);
__blk_end_request_all(req, error);
rc = carm_put_request(host, crq);
assert(rc == 0);
@ -811,12 +810,10 @@ static void carm_oob_rq_fn(struct request_queue *q)
while (1) {
DPRINTK("get req\n");
rq = elv_next_request(q);
rq = blk_fetch_request(q);
if (!rq)
break;
blkdev_dequeue_request(rq);
crq = rq->special;
assert(crq != NULL);
assert(crq->rq == rq);
@ -847,7 +844,7 @@ static void carm_rq_fn(struct request_queue *q)
queue_one_request:
VPRINTK("get req\n");
rq = elv_next_request(q);
rq = blk_peek_request(q);
if (!rq)
return;
@ -858,7 +855,7 @@ queue_one_request:
}
crq->rq = rq;
blkdev_dequeue_request(rq);
blk_start_request(rq);
if (rq_data_dir(rq) == WRITE) {
writing = 1;
@ -904,10 +901,10 @@ queue_one_request:
msg->sg_count = n_elem;
msg->sg_type = SGT_32BIT;
msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
msg->lba = cpu_to_le32(rq->sector & 0xffffffff);
tmp = (rq->sector >> 16) >> 16;
msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
tmp = (blk_rq_pos(rq) >> 16) >> 16;
msg->lba_high = cpu_to_le16( (u16) tmp );
msg->lba_count = cpu_to_le16(rq->nr_sectors);
msg->lba_count = cpu_to_le16(blk_rq_sectors(rq));
msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
for (i = 0; i < n_elem; i++) {

View File

@ -360,8 +360,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
struct ub_scsi_cmd *cmd, struct ub_request *urq);
static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
static void ub_end_rq(struct request *rq, unsigned int status,
unsigned int cmd_len);
static void ub_end_rq(struct request *rq, unsigned int status);
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
struct ub_request *urq, struct ub_scsi_cmd *cmd);
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
@ -627,7 +626,7 @@ static void ub_request_fn(struct request_queue *q)
struct ub_lun *lun = q->queuedata;
struct request *rq;
while ((rq = elv_next_request(q)) != NULL) {
while ((rq = blk_peek_request(q)) != NULL) {
if (ub_request_fn_1(lun, rq) != 0) {
blk_stop_queue(q);
break;
@ -643,14 +642,14 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
int n_elem;
if (atomic_read(&sc->poison)) {
blkdev_dequeue_request(rq);
ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq));
blk_start_request(rq);
ub_end_rq(rq, DID_NO_CONNECT << 16);
return 0;
}
if (lun->changed && !blk_pc_request(rq)) {
blkdev_dequeue_request(rq);
ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq));
blk_start_request(rq);
ub_end_rq(rq, SAM_STAT_CHECK_CONDITION);
return 0;
}
@ -660,7 +659,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
blkdev_dequeue_request(rq);
blk_start_request(rq);
urq = &lun->urq;
memset(urq, 0, sizeof(struct ub_request));
@ -702,7 +701,7 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
drop:
ub_put_cmd(lun, cmd);
ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq));
ub_end_rq(rq, DID_ERROR << 16);
return 0;
}
@ -723,11 +722,11 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
/*
* build the command
*
* The call to blk_queue_hardsect_size() guarantees that request
* The call to blk_queue_logical_block_size() guarantees that request
* is aligned, but it is given in terms of 512 byte units, always.
*/
block = rq->sector >> lun->capacity.bshift;
nblks = rq->nr_sectors >> lun->capacity.bshift;
block = blk_rq_pos(rq) >> lun->capacity.bshift;
nblks = blk_rq_sectors(rq) >> lun->capacity.bshift;
cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10;
/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
@ -739,7 +738,7 @@ static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun,
cmd->cdb[8] = nblks;
cmd->cdb_len = 10;
cmd->len = rq->nr_sectors * 512;
cmd->len = blk_rq_bytes(rq);
}
static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
@ -747,7 +746,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
{
struct request *rq = urq->rq;
if (rq->data_len == 0) {
if (blk_rq_bytes(rq) == 0) {
cmd->dir = UB_DIR_NONE;
} else {
if (rq_data_dir(rq) == WRITE)
@ -762,7 +761,7 @@ static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
cmd->cdb_len = rq->cmd_len;
cmd->len = rq->data_len;
cmd->len = blk_rq_bytes(rq);
/*
* To reapply this to every URB is not as incorrect as it looks.
@ -777,16 +776,15 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
struct ub_request *urq = cmd->back;
struct request *rq;
unsigned int scsi_status;
unsigned int cmd_len;
rq = urq->rq;
if (cmd->error == 0) {
if (blk_pc_request(rq)) {
if (cmd->act_len >= rq->data_len)
rq->data_len = 0;
if (cmd->act_len >= rq->resid_len)
rq->resid_len = 0;
else
rq->data_len -= cmd->act_len;
rq->resid_len -= cmd->act_len;
scsi_status = 0;
} else {
if (cmd->act_len != cmd->len) {
@ -818,17 +816,14 @@ static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
urq->rq = NULL;
cmd_len = cmd->len;
ub_put_cmd(lun, cmd);
ub_end_rq(rq, scsi_status, cmd_len);
ub_end_rq(rq, scsi_status);
blk_start_queue(lun->disk->queue);
}
static void ub_end_rq(struct request *rq, unsigned int scsi_status,
unsigned int cmd_len)
static void ub_end_rq(struct request *rq, unsigned int scsi_status)
{
int error;
long rqlen;
if (scsi_status == 0) {
error = 0;
@ -836,12 +831,7 @@ static void ub_end_rq(struct request *rq, unsigned int scsi_status,
error = -EIO;
rq->errors = scsi_status;
}
rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */
if (__blk_end_request(rq, error, cmd_len)) {
printk(KERN_WARNING DRV_NAME
": __blk_end_request blew, %s-cmd total %u rqlen %ld\n",
blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen);
}
__blk_end_request_all(rq, error);
}
static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun,
@ -1759,7 +1749,7 @@ static int ub_bd_revalidate(struct gendisk *disk)
ub_revalidate(lun->udev, lun);
/* XXX Support sector size switching like in sr.c */
blk_queue_hardsect_size(disk->queue, lun->capacity.bsize);
blk_queue_logical_block_size(disk->queue, lun->capacity.bsize);
set_capacity(disk, lun->capacity.nsec);
// set_disk_ro(sdkp->disk, lun->readonly);
@ -2334,7 +2324,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */
blk_queue_max_sectors(q, UB_MAX_SECTORS);
blk_queue_hardsect_size(q, lun->capacity.bsize);
blk_queue_logical_block_size(q, lun->capacity.bsize);
lun->disk = disk;
q->queuedata = lun;

View File

@ -252,7 +252,7 @@ static int send_request(struct request *req)
struct viodasd_device *d;
unsigned long flags;
start = (u64)req->sector << 9;
start = (u64)blk_rq_pos(req) << 9;
if (rq_data_dir(req) == READ) {
direction = DMA_FROM_DEVICE;
@ -361,19 +361,17 @@ static void do_viodasd_request(struct request_queue *q)
* back later.
*/
while (num_req_outstanding < VIOMAXREQ) {
req = elv_next_request(q);
req = blk_fetch_request(q);
if (req == NULL)
return;
/* dequeue the current request from the queue */
blkdev_dequeue_request(req);
/* check that request contains a valid command */
if (!blk_fs_request(req)) {
viodasd_end_request(req, -EIO, req->hard_nr_sectors);
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
continue;
}
/* Try sending the request */
if (send_request(req) != 0)
viodasd_end_request(req, -EIO, req->hard_nr_sectors);
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
}
}
@ -590,7 +588,7 @@ static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
event->xRc, bevent->sub_result, err->msg);
num_sect = req->hard_nr_sectors;
num_sect = blk_rq_sectors(req);
}
qlock = req->q->queue_lock;
spin_lock_irqsave(qlock, irq_flags);

View File

@ -37,6 +37,7 @@ struct virtblk_req
struct list_head list;
struct request *req;
struct virtio_blk_outhdr out_hdr;
struct virtio_scsi_inhdr in_hdr;
u8 status;
};
@ -50,6 +51,7 @@ static void blk_done(struct virtqueue *vq)
spin_lock_irqsave(&vblk->lock, flags);
while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
int error;
switch (vbr->status) {
case VIRTIO_BLK_S_OK:
error = 0;
@ -62,7 +64,13 @@ static void blk_done(struct virtqueue *vq)
break;
}
__blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req));
if (blk_pc_request(vbr->req)) {
vbr->req->resid_len = vbr->in_hdr.residual;
vbr->req->sense_len = vbr->in_hdr.sense_len;
vbr->req->errors = vbr->in_hdr.errors;
}
__blk_end_request_all(vbr->req, error);
list_del(&vbr->list);
mempool_free(vbr, vblk->pool);
}
@ -74,7 +82,7 @@ static void blk_done(struct virtqueue *vq)
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
struct request *req)
{
unsigned long num, out, in;
unsigned long num, out = 0, in = 0;
struct virtblk_req *vbr;
vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
@ -85,7 +93,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
vbr->req = req;
if (blk_fs_request(vbr->req)) {
vbr->out_hdr.type = 0;
vbr->out_hdr.sector = vbr->req->sector;
vbr->out_hdr.sector = blk_rq_pos(vbr->req);
vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
} else if (blk_pc_request(vbr->req)) {
vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
@ -99,18 +107,36 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
if (blk_barrier_rq(vbr->req))
vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr));
num = blk_rq_map_sg(q, vbr->req, vblk->sg+1);
sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status));
sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
if (rq_data_dir(vbr->req) == WRITE) {
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
out = 1 + num;
in = 1;
} else {
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
out = 1;
in = 1 + num;
/*
* If this is a packet command we need a couple of additional headers.
* Behind the normal outhdr we put a segment with the scsi command
* block, and before the normal inhdr we put the sense data and the
* inhdr with additional status information before the normal inhdr.
*/
if (blk_pc_request(vbr->req))
sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
if (blk_pc_request(vbr->req)) {
sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
sizeof(vbr->in_hdr));
}
sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
sizeof(vbr->status));
if (num) {
if (rq_data_dir(vbr->req) == WRITE) {
vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
out += num;
} else {
vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
in += num;
}
}
if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) {
@ -124,12 +150,11 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
static void do_virtblk_request(struct request_queue *q)
{
struct virtio_blk *vblk = NULL;
struct virtio_blk *vblk = q->queuedata;
struct request *req;
unsigned int issued = 0;
while ((req = elv_next_request(q)) != NULL) {
vblk = req->rq_disk->private_data;
while ((req = blk_peek_request(q)) != NULL) {
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
/* If this request fails, stop queue and wait for something to
@ -138,7 +163,7 @@ static void do_virtblk_request(struct request_queue *q)
blk_stop_queue(q);
break;
}
blkdev_dequeue_request(req);
blk_start_request(req);
issued++;
}
@ -146,12 +171,51 @@ static void do_virtblk_request(struct request_queue *q)
vblk->vq->vq_ops->kick(vblk->vq);
}
/* return ATA identify data
*/
static int virtblk_identify(struct gendisk *disk, void *argp)
{
struct virtio_blk *vblk = disk->private_data;
void *opaque;
int err = -ENOMEM;
opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
if (!opaque)
goto out;
err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY,
offsetof(struct virtio_blk_config, identify), opaque,
VIRTIO_BLK_ID_BYTES);
if (err)
goto out_kfree;
if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES))
err = -EFAULT;
out_kfree:
kfree(opaque);
out:
return err;
}
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long data)
{
return scsi_cmd_ioctl(bdev->bd_disk->queue,
bdev->bd_disk, mode, cmd,
(void __user *)data);
struct gendisk *disk = bdev->bd_disk;
struct virtio_blk *vblk = disk->private_data;
void __user *argp = (void __user *)data;
if (cmd == HDIO_GET_IDENTITY)
return virtblk_identify(disk, argp);
/*
* Only allow the generic SCSI ioctls if the host can support it.
*/
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
return -ENOIOCTLCMD;
return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
@ -249,6 +313,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
}
vblk->disk->queue->queuedata = vblk;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
if (index < 26) {
@ -313,7 +378,7 @@ static int virtblk_probe(struct virtio_device *vdev)
offsetof(struct virtio_blk_config, blk_size),
&blk_size);
if (!err)
blk_queue_hardsect_size(vblk->disk->queue, blk_size);
blk_queue_logical_block_size(vblk->disk->queue, blk_size);
add_disk(vblk->disk);
return 0;
@ -356,6 +421,7 @@ static struct virtio_device_id id_table[] = {
static unsigned int features[] = {
VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY
};
static struct virtio_driver virtio_blk = {

View File

@ -305,30 +305,25 @@ static void do_xd_request (struct request_queue * q)
if (xdc_busy)
return;
while ((req = elv_next_request(q)) != NULL) {
unsigned block = req->sector;
unsigned count = req->nr_sectors;
int rw = rq_data_dir(req);
req = blk_fetch_request(q);
while (req) {
unsigned block = blk_rq_pos(req);
unsigned count = blk_rq_cur_sectors(req);
XD_INFO *disk = req->rq_disk->private_data;
int res = 0;
int res = -EIO;
int retry;
if (!blk_fs_request(req)) {
end_request(req, 0);
continue;
}
if (block + count > get_capacity(req->rq_disk)) {
end_request(req, 0);
continue;
}
if (rw != READ && rw != WRITE) {
printk("do_xd_request: unknown request\n");
end_request(req, 0);
continue;
}
if (!blk_fs_request(req))
goto done;
if (block + count > get_capacity(req->rq_disk))
goto done;
for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
res = xd_readwrite(rw, disk, req->buffer, block, count);
end_request(req, res); /* wrap up, 0 = fail, 1 = success */
res = xd_readwrite(rq_data_dir(req), disk, req->buffer,
block, count);
done:
/* wrap up, 0 = success, -errno = fail */
if (!__blk_end_request_cur(req, res))
req = blk_fetch_request(q);
}
}
@ -418,7 +413,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
xd_recalibrate(drive);
spin_lock_irq(&xd_lock);
return (0);
return -EIO;
case 2:
if (sense[0] & 0x30) {
printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
@ -439,7 +434,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
else
printk(" - no valid disk address\n");
spin_lock_irq(&xd_lock);
return (0);
return -EIO;
}
if (xd_dma_buffer)
for (i=0; i < (temp * 0x200); i++)
@ -448,7 +443,7 @@ static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_
count -= temp, buffer += temp * 0x200, block += temp;
}
spin_lock_irq(&xd_lock);
return (1);
return 0;
}
/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */

View File

@ -122,7 +122,7 @@ static DEFINE_SPINLOCK(blkif_io_lock);
static int get_id_from_freelist(struct blkfront_info *info)
{
unsigned long free = info->shadow_free;
BUG_ON(free > BLK_RING_SIZE);
BUG_ON(free >= BLK_RING_SIZE);
info->shadow_free = info->shadow[free].req.id;
info->shadow[free].req.id = 0x0fffffee; /* debug */
return free;
@ -231,7 +231,7 @@ static int blkif_queue_request(struct request *req)
info->shadow[id].request = (unsigned long)req;
ring_req->id = id;
ring_req->sector_number = (blkif_sector_t)req->sector;
ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
ring_req->handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
@ -299,25 +299,25 @@ static void do_blkif_request(struct request_queue *rq)
queued = 0;
while ((req = elv_next_request(rq)) != NULL) {
while ((req = blk_peek_request(rq)) != NULL) {
info = req->rq_disk->private_data;
if (!blk_fs_request(req)) {
end_request(req, 0);
continue;
}
if (RING_FULL(&info->ring))
goto wait;
blk_start_request(req);
if (!blk_fs_request(req)) {
__blk_end_request_all(req, -EIO);
continue;
}
pr_debug("do_blk_req %p: cmd %p, sec %lx, "
"(%u/%li) buffer:%p [%s]\n",
req, req->cmd, (unsigned long)req->sector,
req->current_nr_sectors,
req->nr_sectors, req->buffer,
rq_data_dir(req) ? "write" : "read");
"(%u/%u) buffer:%p [%s]\n",
req, req->cmd, (unsigned long)blk_rq_pos(req),
blk_rq_cur_sectors(req), blk_rq_sectors(req),
req->buffer, rq_data_dir(req) ? "write" : "read");
blkdev_dequeue_request(req);
if (blkif_queue_request(req)) {
blk_requeue_request(rq, req);
wait:
@ -344,7 +344,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
/* Hard sector size and max sectors impersonate the equiv. hardware. */
blk_queue_hardsect_size(rq, sector_size);
blk_queue_logical_block_size(rq, sector_size);
blk_queue_max_sectors(rq, 512);
/* Each segment in a request is up to an aligned page in size. */
@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
for (i = info->ring.rsp_cons; i != rp; i++) {
unsigned long id;
int ret;
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
ret = __blk_end_request(req, error, blk_rq_bytes(req));
BUG_ON(ret);
__blk_end_request_all(req, error);
break;
default:
BUG();

View File

@ -463,10 +463,11 @@ struct request *ace_get_next_request(struct request_queue * q)
{
struct request *req;
while ((req = elv_next_request(q)) != NULL) {
while ((req = blk_peek_request(q)) != NULL) {
if (blk_fs_request(req))
break;
end_request(req, 0);
blk_start_request(req);
__blk_end_request_all(req, -EIO);
}
return req;
}
@ -492,9 +493,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
set_capacity(ace->gd, 0);
dev_info(ace->dev, "No CF in slot\n");
/* Drop all pending requests */
while ((req = elv_next_request(ace->queue)) != NULL)
end_request(req, 0);
/* Drop all in-flight and pending requests */
if (ace->req) {
__blk_end_request_all(ace->req, -EIO);
ace->req = NULL;
}
while ((req = blk_fetch_request(ace->queue)) != NULL)
__blk_end_request_all(req, -EIO);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
@ -642,19 +647,21 @@ static void ace_fsm_dostate(struct ace_device *ace)
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
blk_start_request(req);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
"request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n",
(unsigned long long) req->sector, req->hard_nr_sectors,
req->current_nr_sectors, rq_data_dir(req));
"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
(unsigned long long)blk_rq_pos(req),
blk_rq_sectors(req), blk_rq_cur_sectors(req),
rq_data_dir(req));
ace->req = req;
ace->data_ptr = req->buffer;
ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);
ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
count = req->hard_nr_sectors;
count = blk_rq_sectors(req);
if (rq_data_dir(req)) {
/* Kick off write request */
dev_dbg(ace->dev, "write data\n");
@ -688,7 +695,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
dev_dbg(ace->dev,
"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
ace->req->current_nr_sectors * 16,
blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yield(ace); /* need to poll CFBSY bit */
break;
@ -697,7 +704,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
dev_dbg(ace->dev,
"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
ace->fsm_task, ace->fsm_iter_num,
ace->req->current_nr_sectors * 16,
blk_rq_cur_sectors(ace->req) * 16,
ace->data_count, ace->in_irq);
ace_fsm_yieldirq(ace);
break;
@ -717,14 +724,13 @@ static void ace_fsm_dostate(struct ace_device *ace)
}
/* bio finished; is there another one? */
if (__blk_end_request(ace->req, 0,
blk_rq_cur_bytes(ace->req))) {
/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
* ace->req->hard_nr_sectors,
* ace->req->current_nr_sectors);
if (__blk_end_request_cur(ace->req, 0)) {
/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
* blk_rq_sectors(ace->req),
* blk_rq_cur_sectors(ace->req));
*/
ace->data_ptr = ace->req->buffer;
ace->data_count = ace->req->current_nr_sectors * 16;
ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
ace_fsm_yieldirq(ace);
break;
}
@ -978,7 +984,7 @@ static int __devinit ace_setup(struct ace_device *ace)
ace->queue = blk_init_queue(ace_request, &ace->lock);
if (ace->queue == NULL)
goto err_blk_initq;
blk_queue_hardsect_size(ace->queue, 512);
blk_queue_logical_block_size(ace->queue, 512);
/*
* Allocate and initialize GD structure

View File

@ -70,15 +70,18 @@ static struct gendisk *z2ram_gendisk;
static void do_z2_request(struct request_queue *q)
{
struct request *req;
while ((req = elv_next_request(q)) != NULL) {
unsigned long start = req->sector << 9;
unsigned long len = req->current_nr_sectors << 9;
req = blk_fetch_request(q);
while (req) {
unsigned long start = blk_rq_pos(req) << 9;
unsigned long len = blk_rq_cur_bytes(req);
int err = 0;
if (start + len > z2ram_size) {
printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
req->sector, req->current_nr_sectors);
end_request(req, 0);
continue;
blk_rq_pos(req), blk_rq_cur_sectors(req));
err = -EIO;
goto done;
}
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
@ -93,7 +96,9 @@ static void do_z2_request(struct request_queue *q)
start += size;
len -= size;
}
end_request(req, 1);
done:
if (!__blk_end_request_cur(req, err))
req = blk_fetch_request(q);
}
}

View File

@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
len = nr * CD_FRAMESIZE_RAW;

View File

@ -584,8 +584,8 @@ static void gdrom_readdisk_dma(struct work_struct *work)
list_for_each_safe(elem, next, &gdrom_deferred) {
req = list_entry(elem, struct request, queuelist);
spin_unlock(&gdrom_lock);
block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET;
block_cnt = req->nr_sectors/GD_TO_BLK;
block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG);
ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
ctrl_outl(1, GDROM_DMA_DIRECTION_REG);
@ -632,39 +632,35 @@ static void gdrom_readdisk_dma(struct work_struct *work)
* before handling ending the request */
spin_lock(&gdrom_lock);
list_del_init(&req->queuelist);
__blk_end_request(req, err, blk_rq_bytes(req));
__blk_end_request_all(req, err);
}
spin_unlock(&gdrom_lock);
kfree(read_command);
}
static void gdrom_request_handler_dma(struct request *req)
{
/* dequeue, add to list of deferred work
* and then schedule workqueue */
blkdev_dequeue_request(req);
list_add_tail(&req->queuelist, &gdrom_deferred);
schedule_work(&work);
}
static void gdrom_request(struct request_queue *rq)
{
struct request *req;
while ((req = elv_next_request(rq)) != NULL) {
while ((req = blk_fetch_request(rq)) != NULL) {
if (!blk_fs_request(req)) {
printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
end_request(req, 0);
__blk_end_request_all(req, -EIO);
continue;
}
if (rq_data_dir(req) != READ) {
printk(KERN_NOTICE "GDROM: Read only device -");
printk(" write request ignored\n");
end_request(req, 0);
__blk_end_request_all(req, -EIO);
continue;
}
if (req->nr_sectors)
gdrom_request_handler_dma(req);
else
end_request(req, 0);
/*
* Add to list of deferred work and then schedule
* workqueue.
*/
list_add_tail(&req->queuelist, &gdrom_deferred);
schedule_work(&work);
}
}
@ -743,7 +739,7 @@ static void __devinit probe_gdrom_setupdisk(void)
static int __devinit probe_gdrom_setupqueue(void)
{
blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR);
/* using DMA so memory will need to be contiguous */
blk_queue_max_hw_segments(gd.gdrom_rq, 1);
/* set a large max size to get most from DMA */

View File

@ -282,7 +282,7 @@ static int send_request(struct request *req)
viopath_targetinst(viopath_hostLp),
(u64)req, VIOVERSION << 16,
((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr,
(u64)req->sector * 512, len, 0);
(u64)blk_rq_pos(req) * 512, len, 0);
if (hvrc != HvLpEvent_Rc_Good) {
printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc);
return -1;
@ -291,36 +291,19 @@ static int send_request(struct request *req)
return 0;
}
static void viocd_end_request(struct request *req, int error)
{
int nsectors = req->hard_nr_sectors;
/*
* Make sure it's fully ended, and ensure that we process
* at least one sector.
*/
if (blk_pc_request(req))
nsectors = (req->data_len + 511) >> 9;
if (!nsectors)
nsectors = 1;
if (__blk_end_request(req, error, nsectors << 9))
BUG();
}
static int rwreq;
static void do_viocd_request(struct request_queue *q)
{
struct request *req;
while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
if (!blk_fs_request(req))
viocd_end_request(req, -EIO);
__blk_end_request_all(req, -EIO);
else if (send_request(req) < 0) {
printk(VIOCD_KERN_WARNING
"unable to send message to OS/400!");
viocd_end_request(req, -EIO);
__blk_end_request_all(req, -EIO);
} else
rwreq++;
}
@ -486,8 +469,8 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
case viocdopen:
if (event->xRc == 0) {
di = &viocd_diskinfo[bevent->disk];
blk_queue_hardsect_size(di->viocd_disk->queue,
bevent->block_size);
blk_queue_logical_block_size(di->viocd_disk->queue,
bevent->block_size);
set_capacity(di->viocd_disk,
bevent->media_size *
bevent->block_size / 512);
@ -531,9 +514,9 @@ return_complete:
"with rc %d:0x%04X: %s\n",
req, event->xRc,
bevent->sub_result, err->msg);
viocd_end_request(req, -EIO);
__blk_end_request_all(req, -EIO);
} else
viocd_end_request(req, 0);
__blk_end_request_all(req, 0);
/* restart handling of incoming requests */
spin_unlock_irqrestore(&viocd_reqlock, flags);

View File

@ -71,7 +71,7 @@ static int raw_open(struct inode *inode, struct file *filp)
err = bd_claim(bdev, raw_open);
if (err)
goto out1;
err = set_blocksize(bdev, bdev_hardsect_size(bdev));
err = set_blocksize(bdev, bdev_logical_block_size(bdev));
if (err)
goto out2;
filp->f_flags |= O_DIRECT;

View File

@ -246,6 +246,7 @@ EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
*/
void ide_retry_pc(ide_drive_t *drive)
{
struct request *failed_rq = drive->hwif->rq;
struct request *sense_rq = &drive->sense_rq;
struct ide_atapi_pc *pc = &drive->request_sense_pc;
@ -255,13 +256,22 @@ void ide_retry_pc(ide_drive_t *drive)
ide_init_pc(pc);
memcpy(pc->c, sense_rq->cmd, 12);
pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */
pc->req_xfer = sense_rq->data_len;
pc->req_xfer = blk_rq_bytes(sense_rq);
if (drive->media == ide_tape)
set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags);
if (ide_queue_sense_rq(drive, pc))
ide_complete_rq(drive, -EIO, blk_rq_bytes(drive->hwif->rq));
/*
* Push back the failed request and put request sense on top
* of it. The failed command will be retried after sense data
* is acquired.
*/
blk_requeue_request(failed_rq->q, failed_rq);
drive->hwif->rq = NULL;
if (ide_queue_sense_rq(drive, pc)) {
blk_start_request(failed_rq);
ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
}
}
EXPORT_SYMBOL_GPL(ide_retry_pc);
@ -303,7 +313,7 @@ int ide_cd_get_xferlen(struct request *rq)
return 32768;
else if (blk_sense_request(rq) || blk_pc_request(rq) ||
rq->cmd_type == REQ_TYPE_ATA_PC)
return rq->data_len;
return blk_rq_bytes(rq);
else
return 0;
}
@ -367,7 +377,6 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
/* No more interrupts */
if ((stat & ATA_DRQ) == 0) {
int uptodate, error;
unsigned int done;
debug_log("Packet command completed, %d bytes transferred\n",
pc->xferred);
@ -431,7 +440,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
error = uptodate ? 0 : -EIO;
}
ide_complete_rq(drive, error, done);
ide_complete_rq(drive, error, blk_rq_bytes(rq));
return ide_stopped;
}

View File

@ -182,7 +182,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
(sense->information[2] << 8) |
(sense->information[3]);
if (drive->queue->hardsect_size == 2048)
if (queue_logical_block_size(drive->queue) == 2048)
/* device sector size is 2K */
sector <<= 2;
@ -404,15 +404,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat)
end_request:
if (stat & ATA_ERR) {
struct request_queue *q = drive->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blkdev_dequeue_request(rq);
spin_unlock_irqrestore(q->queue_lock, flags);
hwif->rq = NULL;
return ide_queue_sense_rq(drive, rq) ? 2 : 1;
} else
return 2;
@ -518,7 +510,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
error = blk_execute_rq(drive->queue, info->disk, rq, 0);
if (buffer)
*bufflen = rq->data_len;
*bufflen = rq->resid_len;
flags = rq->cmd_flags;
blk_put_request(rq);
@ -576,7 +568,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
struct request *rq = hwif->rq;
ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, thislen, uptodate = 0;
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors;
int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0;
int sense = blk_sense_request(rq);
unsigned int timeout;
u16 len;
@ -706,13 +698,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
out_end:
if (blk_pc_request(rq) && rc == 0) {
unsigned int dlen = rq->data_len;
rq->data_len = 0;
if (blk_end_request(rq, 0, dlen))
BUG();
rq->resid_len = 0;
blk_end_request_all(rq, 0);
hwif->rq = NULL;
} else {
if (sense && uptodate)
@ -730,21 +717,13 @@ out_end:
ide_cd_error_cmd(drive, cmd);
/* make sure it's fully ended */
if (blk_pc_request(rq))
nsectors = (rq->data_len + 511) >> 9;
else
nsectors = rq->hard_nr_sectors;
if (nsectors == 0)
nsectors = 1;
if (blk_fs_request(rq) == 0) {
rq->data_len -= (cmd->nbytes - cmd->nleft);
rq->resid_len -= cmd->nbytes - cmd->nleft;
if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE))
rq->data_len += cmd->last_xfer_len;
rq->resid_len += cmd->last_xfer_len;
}
ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9);
ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq));
if (sense && rc == 2)
ide_error(drive, "request sense failure", stat);
@ -758,7 +737,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
struct request_queue *q = drive->queue;
int write = rq_data_dir(rq) == WRITE;
unsigned short sectors_per_frame =
queue_hardsect_size(q) >> SECTOR_BITS;
queue_logical_block_size(q) >> SECTOR_BITS;
ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, "
"secs_per_frame: %u",
@ -777,8 +756,8 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
}
/* fs requests *must* be hardware frame aligned */
if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
(rq->sector & (sectors_per_frame - 1)))
if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) ||
(blk_rq_pos(rq) & (sectors_per_frame - 1)))
return ide_stopped;
/* use DMA, if possible */
@ -821,7 +800,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
*/
alignment = queue_dma_alignment(q) | q->dma_pad_mask;
if ((unsigned long)buf & alignment
|| rq->data_len & q->dma_pad_mask
|| blk_rq_bytes(rq) & q->dma_pad_mask
|| object_is_on_stack(buf))
drive->dma = 0;
}
@ -869,15 +848,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
cmd.rq = rq;
if (blk_fs_request(rq) || rq->data_len) {
ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9)
: rq->data_len);
if (blk_fs_request(rq) || blk_rq_bytes(rq)) {
ide_init_sg_cmd(&cmd, blk_rq_bytes(rq));
ide_map_sg(drive, &cmd);
}
return ide_issue_pc(drive, &cmd);
out_end:
nsectors = rq->hard_nr_sectors;
nsectors = blk_rq_sectors(rq);
if (nsectors == 0)
nsectors = 1;
@ -1043,8 +1021,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
/* save a private copy of the TOC capacity for error handling */
drive->probed_capacity = toc->capacity * sectors_per_frame;
blk_queue_hardsect_size(drive->queue,
sectors_per_frame << SECTOR_BITS);
blk_queue_logical_block_size(drive->queue,
sectors_per_frame << SECTOR_BITS);
/* first read just the header, so we know how long the TOC is */
stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
@ -1360,9 +1338,9 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
/* standard prep_rq_fn that builds 10 byte cmds */
static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
{
int hard_sect = queue_hardsect_size(q);
long block = (long)rq->hard_sector / (hard_sect >> 9);
unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
int hard_sect = queue_logical_block_size(q);
long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
memset(rq->cmd, 0, BLK_MAX_CDB);
@ -1565,7 +1543,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
nslots = ide_cdrom_probe_capabilities(drive);
blk_queue_hardsect_size(q, CD_FRAMESIZE);
blk_queue_logical_block_size(q, CD_FRAMESIZE);
if (ide_cdrom_register(drive, nslots)) {
printk(KERN_ERR PFX "%s: %s failed to register device with the"

View File

@ -82,7 +82,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
sector_t block)
{
ide_hwif_t *hwif = drive->hwif;
u16 nsectors = (u16)rq->nr_sectors;
u16 nsectors = (u16)blk_rq_sectors(rq);
u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
struct ide_cmd cmd;
@ -90,7 +90,7 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ide_startstop_t rc;
if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
if (block + rq->nr_sectors > 1ULL << 28)
if (block + blk_rq_sectors(rq) > 1ULL << 28)
dma = 0;
else
lba48 = 0;
@ -195,9 +195,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ledtrig_ide_activity();
pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
(unsigned long long)block, rq->nr_sectors,
(unsigned long long)block, blk_rq_sectors(rq),
(unsigned long)rq->buffer);
if (hwif->rw_disk)
@ -639,7 +639,7 @@ static void ide_disk_setup(ide_drive_t *drive)
}
printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
q->max_sectors / 2);
queue_max_sectors(q) / 2);
if (ata_id_is_ssd(id))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);

View File

@ -103,7 +103,7 @@ ide_startstop_t ide_dma_intr(ide_drive_t *drive)
ide_finish_cmd(drive, cmd, stat);
else
ide_complete_rq(drive, 0,
cmd->rq->nr_sectors << 9);
blk_rq_sectors(cmd->rq) << 9);
return ide_stopped;
}
printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",

View File

@ -194,7 +194,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
{
struct ide_disk_obj *floppy = drive->driver_data;
int block = sector / floppy->bs_factor;
int blocks = rq->nr_sectors / floppy->bs_factor;
int blocks = blk_rq_sectors(rq) / floppy->bs_factor;
int cmd = rq_data_dir(rq);
ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks);
@ -220,14 +220,14 @@ static void idefloppy_blockpc_cmd(struct ide_disk_obj *floppy,
ide_init_pc(pc);
memcpy(pc->c, rq->cmd, sizeof(pc->c));
pc->rq = rq;
if (rq->data_len) {
if (blk_rq_bytes(rq)) {
pc->flags |= PC_FLAG_DMA_OK;
if (rq_data_dir(rq) == WRITE)
pc->flags |= PC_FLAG_WRITING;
}
/* pio will be performed by ide_pio_bytes() which handles sg fine */
pc->buf = NULL;
pc->req_xfer = pc->buf_size = rq->data_len;
pc->req_xfer = pc->buf_size = blk_rq_bytes(rq);
}
static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
@ -259,8 +259,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
goto out_end;
}
if (blk_fs_request(rq)) {
if (((long)rq->sector % floppy->bs_factor) ||
(rq->nr_sectors % floppy->bs_factor)) {
if (((long)blk_rq_pos(rq) % floppy->bs_factor) ||
(blk_rq_sectors(rq) % floppy->bs_factor)) {
printk(KERN_ERR PFX "%s: unsupported r/w rq size\n",
drive->name);
goto out_end;

View File

@ -116,9 +116,9 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
unsigned int ide_rq_bytes(struct request *rq)
{
if (blk_pc_request(rq))
return rq->data_len;
return blk_rq_bytes(rq);
else
return rq->hard_cur_sectors << 9;
return blk_rq_cur_sectors(rq) << 9;
}
EXPORT_SYMBOL_GPL(ide_rq_bytes);
@ -133,7 +133,7 @@ int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes)
* and complete the whole request right now
*/
if (blk_noretry_request(rq) && error <= 0)
nr_bytes = rq->hard_nr_sectors << 9;
nr_bytes = blk_rq_sectors(rq) << 9;
rc = ide_end_rq(drive, rq, error, nr_bytes);
if (rc == 0)
@ -279,7 +279,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
if (cmd) {
if (cmd->protocol == ATA_PROT_PIO) {
ide_init_sg_cmd(cmd, rq->nr_sectors << 9);
ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9);
ide_map_sg(drive, cmd);
}
@ -387,7 +387,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
drv = *(struct ide_driver **)rq->rq_disk->private_data;
return drv->do_request(drive, rq, rq->sector);
return drv->do_request(drive, rq, blk_rq_pos(rq));
}
return do_special(drive);
kill_rq:
@ -487,10 +487,10 @@ void do_ide_request(struct request_queue *q)
if (!ide_lock_port(hwif)) {
ide_hwif_t *prev_port;
WARN_ON_ONCE(hwif->rq);
repeat:
prev_port = hwif->host->cur_port;
hwif->rq = NULL;
if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
time_after(drive->sleep, jiffies)) {
ide_unlock_port(hwif);
@ -519,7 +519,9 @@ repeat:
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
rq = elv_next_request(drive->queue);
if (!rq)
rq = blk_fetch_request(drive->queue);
spin_unlock_irq(q->queue_lock);
spin_lock_irq(&hwif->lock);
@ -531,7 +533,7 @@ repeat:
/*
* Sanity: don't accept a request that isn't a PM request
* if we are currently power managed. This is very important as
* blk_stop_queue() doesn't prevent the elv_next_request()
* blk_stop_queue() doesn't prevent the blk_fetch_request()
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
@ -555,8 +557,11 @@ repeat:
startstop = start_request(drive, rq);
spin_lock_irq(&hwif->lock);
if (startstop == ide_stopped)
if (startstop == ide_stopped) {
rq = hwif->rq;
hwif->rq = NULL;
goto repeat;
}
} else
goto plug_device;
out:
@ -572,18 +577,24 @@ plug_device:
plug_device_2:
spin_lock_irq(q->queue_lock);
if (rq)
blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
}
static void ide_plug_device(ide_drive_t *drive)
static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
if (rq)
blk_requeue_request(q, rq);
if (!elv_queue_empty(q))
blk_plug_device(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
@ -632,6 +643,7 @@ void ide_timer_expiry (unsigned long data)
unsigned long flags;
int wait = -1;
int plug_device = 0;
struct request *uninitialized_var(rq_in_flight);
spin_lock_irqsave(&hwif->lock, flags);
@ -693,6 +705,8 @@ void ide_timer_expiry (unsigned long data)
spin_lock_irq(&hwif->lock);
enable_irq(hwif->irq);
if (startstop == ide_stopped && hwif->polling == 0) {
rq_in_flight = hwif->rq;
hwif->rq = NULL;
ide_unlock_port(hwif);
plug_device = 1;
}
@ -701,7 +715,7 @@ void ide_timer_expiry (unsigned long data)
if (plug_device) {
ide_unlock_host(hwif->host);
ide_plug_device(drive);
ide_requeue_and_plug(drive, rq_in_flight);
}
}
@ -787,6 +801,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
ide_startstop_t startstop;
irqreturn_t irq_ret = IRQ_NONE;
int plug_device = 0;
struct request *uninitialized_var(rq_in_flight);
if (host->host_flags & IDE_HFLAG_SERIALIZE) {
if (hwif != host->cur_port)
@ -866,6 +881,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
*/
if (startstop == ide_stopped && hwif->polling == 0) {
BUG_ON(hwif->handler);
rq_in_flight = hwif->rq;
hwif->rq = NULL;
ide_unlock_port(hwif);
plug_device = 1;
}
@ -875,7 +892,7 @@ out:
out_early:
if (plug_device) {
ide_unlock_host(hwif->host);
ide_plug_device(drive);
ide_requeue_and_plug(drive, rq_in_flight);
}
return irq_ret;

View File

@ -96,7 +96,7 @@ static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
if (rq)
printk(KERN_CONT ", sector=%llu",
(unsigned long long)rq->sector);
(unsigned long long)blk_rq_pos(rq));
}
printk(KERN_CONT "\n");
}

View File

@ -380,7 +380,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc)
}
tape->first_frame += blocks;
rq->data_len -= blocks * tape->blk_size;
rq->resid_len -= blocks * tape->blk_size;
if (pc->error) {
uptodate = 0;
@ -586,7 +586,7 @@ static void ide_tape_create_rw_cmd(idetape_tape_t *tape,
struct ide_atapi_pc *pc, struct request *rq,
u8 opcode)
{
unsigned int length = rq->nr_sectors;
unsigned int length = blk_rq_sectors(rq);
ide_init_pc(pc);
put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
@ -617,8 +617,8 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
struct ide_cmd cmd;
u8 stat;
debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu\n",
(unsigned long long)rq->sector, rq->nr_sectors);
debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n"
(unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq));
if (!(blk_special_request(rq) || blk_sense_request(rq))) {
/* We do not support buffer cache originated requests. */
@ -892,7 +892,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd[13] = cmd;
rq->rq_disk = tape->disk;
rq->sector = tape->first_frame;
rq->__sector = tape->first_frame;
if (size) {
ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
@ -904,7 +904,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
blk_execute_rq(drive->queue, tape->disk, rq, 0);
/* calculate the number of transferred bytes and update buffer state */
size -= rq->data_len;
size -= rq->resid_len;
tape->cur = tape->buf;
if (cmd == REQ_IDETAPE_READ)
tape->valid = size;

View File

@ -385,7 +385,7 @@ out_end:
if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
ide_finish_cmd(drive, cmd, stat);
else
ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9);
ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9);
return ide_stopped;
out_err:
ide_error_cmd(drive, cmd);

View File

@ -177,7 +177,7 @@ static void pdc202xx_dma_start(ide_drive_t *drive)
u8 clock = inb(high_16 + 0x11);
outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11);
word_count = (rq->nr_sectors << 8);
word_count = (blk_rq_sectors(rq) << 8);
word_count = (rq_data_dir(rq) == READ) ?
word_count | 0x05000000 :
word_count | 0x06000000;

View File

@ -112,7 +112,7 @@ static void tc86c001_dma_start(ide_drive_t *drive)
ide_hwif_t *hwif = drive->hwif;
unsigned long sc_base = hwif->config_data;
unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04);
unsigned long nsectors = hwif->rq->nr_sectors;
unsigned long nsectors = blk_rq_sectors(hwif->rq);
/*
* We have to manually load the sector count and size into

View File

@ -307,7 +307,7 @@ static int tx4939ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ?
TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1);
tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt);
tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt);
return 0;
}

View File

@ -232,7 +232,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset,
target = rdev->sb_start + offset + index * (PAGE_SIZE/512);
if (sync_page_io(rdev->bdev, target,
roundup(size, bdev_hardsect_size(rdev->bdev)),
roundup(size, bdev_logical_block_size(rdev->bdev)),
page, READ)) {
page->index = index;
attach_page_buffers(page, NULL); /* so that free_buffer will
@ -287,7 +287,7 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
int size = PAGE_SIZE;
if (page->index == bitmap->file_pages-1)
size = roundup(bitmap->last_page_size,
bdev_hardsect_size(rdev->bdev));
bdev_logical_block_size(rdev->bdev));
/* Just make sure we aren't corrupting data or
* metadata
*/

View File

@ -178,7 +178,7 @@ static int set_chunk_size(struct dm_exception_store *store,
}
/* Validate the chunk size against the device block size */
if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) {
if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) {
*error = "Chunk size is not a multiple of device blocksize";
return -EINVAL;
}

View File

@ -413,7 +413,8 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
* Buffer holds both header and bitset.
*/
buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
bitset_size, ti->limits.hardsect_size);
bitset_size,
ti->limits.logical_block_size);
if (buf_size > dev->bdev->bd_inode->i_size) {
DMWARN("log device %s too small: need %llu bytes",

View File

@ -282,7 +282,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
*/
if (!ps->store->chunk_size) {
ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
bdev_hardsect_size(ps->store->cow->bdev) >> 9);
bdev_logical_block_size(ps->store->cow->bdev) >> 9);
ps->store->chunk_mask = ps->store->chunk_size - 1;
ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
chunk_size_supplied = 0;

View File

@ -108,7 +108,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
lhs->max_hw_segments =
min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments);
lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size);
lhs->logical_block_size = max(lhs->logical_block_size,
rhs->logical_block_size);
lhs->max_segment_size =
min_not_zero(lhs->max_segment_size, rhs->max_segment_size);
@ -509,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
* combine_restrictions_low()
*/
rs->max_sectors =
min_not_zero(rs->max_sectors, q->max_sectors);
min_not_zero(rs->max_sectors, queue_max_sectors(q));
/*
* Check if merge fn is supported.
@ -524,24 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
rs->max_phys_segments =
min_not_zero(rs->max_phys_segments,
q->max_phys_segments);
queue_max_phys_segments(q));
rs->max_hw_segments =
min_not_zero(rs->max_hw_segments, q->max_hw_segments);
min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size);
rs->logical_block_size = max(rs->logical_block_size,
queue_logical_block_size(q));
rs->max_segment_size =
min_not_zero(rs->max_segment_size, q->max_segment_size);
min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
rs->max_hw_sectors =
min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
rs->seg_boundary_mask =
min_not_zero(rs->seg_boundary_mask,
q->seg_boundary_mask);
queue_segment_boundary(q));
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
}
@ -683,8 +685,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
rs->max_phys_segments = MAX_PHYS_SEGMENTS;
if (!rs->max_hw_segments)
rs->max_hw_segments = MAX_HW_SEGMENTS;
if (!rs->hardsect_size)
rs->hardsect_size = 1 << SECTOR_SHIFT;
if (!rs->logical_block_size)
rs->logical_block_size = 1 << SECTOR_SHIFT;
if (!rs->max_segment_size)
rs->max_segment_size = MAX_SEGMENT_SIZE;
if (!rs->seg_boundary_mask)
@ -912,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
* restrictions.
*/
blk_queue_max_sectors(q, t->limits.max_sectors);
q->max_phys_segments = t->limits.max_phys_segments;
q->max_hw_segments = t->limits.max_hw_segments;
q->hardsect_size = t->limits.hardsect_size;
q->max_segment_size = t->limits.max_segment_size;
q->max_hw_sectors = t->limits.max_hw_sectors;
q->seg_boundary_mask = t->limits.seg_boundary_mask;
q->bounce_pfn = t->limits.bounce_pfn;
blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
blk_queue_logical_block_size(q, t->limits.logical_block_size);
blk_queue_max_segment_size(q, t->limits.max_segment_size);
blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
blk_queue_bounce_limit(q, t->limits.bounce_pfn);
if (t->limits.no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);

View File

@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->num_sectors = rdev->sectors;

View File

@ -1202,7 +1202,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1;
bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
if (rdev->sb_size & bmask)
rdev->sb_size = (rdev->sb_size | bmask) + 1;

View File

@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* merge_bvec_fn will be involved in multipath.)
*/
if (q->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
queue_max_sectors(q) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
conf->working_disks++;
@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!test_bit(Faulty, &rdev->flags))

View File

@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
*/
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!smallest || (rdev1->sectors < smallest->sectors))

View File

@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;

View File

@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
mddev->queue->max_sectors = (PAGE_SIZE>>9);
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
rdev->raid_disk = mirror;
@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
mddev->queue->max_sectors > (PAGE_SIZE>>9))
mddev->queue->max_sectors = (PAGE_SIZE>>9);
queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
}

View File

@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
{
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
if ((bi->bi_size>>9) > q->max_sectors)
if ((bi->bi_size>>9) > queue_max_sectors(q))
return 0;
blk_recount_segments(q, bi);
if (bi->bi_phys_segments > q->max_phys_segments)
if (bi->bi_phys_segments > queue_max_phys_segments(q))
return 0;
if (q->merge_bvec_fn)

View File

@ -672,15 +672,14 @@ try_again:
msb->req_sg);
if (!msb->seg_count) {
chunk = __blk_end_request(msb->block_req, -ENOMEM,
blk_rq_cur_bytes(msb->block_req));
chunk = __blk_end_request_cur(msb->block_req, -ENOMEM);
continue;
}
t_sec = msb->block_req->sector << 9;
t_sec = blk_rq_pos(msb->block_req) << 9;
sector_div(t_sec, msb->page_size);
count = msb->block_req->nr_sectors << 9;
count = blk_rq_bytes(msb->block_req);
count /= msb->page_size;
param.system = msb->system;
@ -705,8 +704,8 @@ try_again:
return 0;
}
dev_dbg(&card->dev, "elv_next\n");
msb->block_req = elv_next_request(msb->queue);
dev_dbg(&card->dev, "blk_fetch\n");
msb->block_req = blk_fetch_request(msb->queue);
if (!msb->block_req) {
dev_dbg(&card->dev, "issue end\n");
return -EAGAIN;
@ -745,7 +744,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
t_len *= msb->page_size;
}
} else
t_len = msb->block_req->nr_sectors << 9;
t_len = blk_rq_bytes(msb->block_req);
dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error);
@ -825,8 +824,8 @@ static void mspro_block_submit_req(struct request_queue *q)
return;
if (msb->eject) {
while ((req = elv_next_request(q)) != NULL)
__blk_end_request(req, -ENODEV, blk_rq_bytes(req));
while ((req = blk_fetch_request(q)) != NULL)
__blk_end_request_all(req, -ENODEV);
return;
}
@ -1243,7 +1242,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
sprintf(msb->disk->disk_name, "mspblk%d", disk_id);
blk_queue_hardsect_size(msb->queue, msb->page_size);
blk_queue_logical_block_size(msb->queue, msb->page_size);
capacity = be16_to_cpu(sys_info->user_block_count);
capacity *= be16_to_cpu(sys_info->block_size);

View File

@ -1277,8 +1277,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* do we need to support multiple segments? */
if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
ioc->name, __func__, req->bio->bi_vcnt, req->data_len,
rsp->bio->bi_vcnt, rsp->data_len);
ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
return -EINVAL;
}
@ -1295,7 +1295,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smpreq = (SmpPassthroughRequest_t *)mf;
memset(smpreq, 0, sizeof(*smpreq));
smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4);
smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
if (rphy)
@ -1321,10 +1321,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
MPI_SGE_FLAGS_END_OF_BUFFER |
MPI_SGE_FLAGS_DIRECTION |
mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
flagsLength |= (req->data_len - 4);
flagsLength |= (blk_rq_bytes(req) - 4);
dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
req->data_len, PCI_DMA_BIDIRECTIONAL);
blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_out)
goto put_mf;
mpt_add_sge(psge, flagsLength, dma_addr_out);
@ -1332,9 +1332,9 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
/* response */
flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
flagsLength |= rsp->data_len + 4;
flagsLength |= blk_rq_bytes(rsp) + 4;
dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
rsp->data_len, PCI_DMA_BIDIRECTIONAL);
blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
if (!dma_addr_in)
goto unmap;
mpt_add_sge(psge, flagsLength, dma_addr_in);
@ -1357,8 +1357,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
memcpy(req->sense, smprep, sizeof(*smprep));
req->sense_len = sizeof(*smprep);
req->data_len = 0;
rsp->data_len -= smprep->ResponseDataLength;
req->resid_len = 0;
rsp->resid_len -= smprep->ResponseDataLength;
} else {
printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
ioc->name, __func__);
@ -1366,10 +1366,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
}
unmap:
if (dma_addr_out)
pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len,
pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
PCI_DMA_BIDIRECTIONAL);
if (dma_addr_in)
pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len,
pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
PCI_DMA_BIDIRECTIONAL);
put_mf:
if (mf)

View File

@ -426,15 +426,9 @@ static void i2o_block_end_request(struct request *req, int error,
struct request_queue *q = req->q;
unsigned long flags;
if (blk_end_request(req, error, nr_bytes)) {
int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
if (blk_pc_request(req))
leftover = req->data_len;
if (blk_end_request(req, error, nr_bytes))
if (error)
blk_end_request(req, -EIO, leftover);
}
blk_end_request_all(req, -EIO);
spin_lock_irqsave(q->queue_lock, flags);
@ -761,7 +755,7 @@ static int i2o_block_transfer(struct request *req)
break;
case CACHE_SMARTFETCH:
if (req->nr_sectors > 16)
if (blk_rq_sectors(req) > 16)
ctl_flags = 0x201F0008;
else
ctl_flags = 0x001F0000;
@ -781,13 +775,13 @@ static int i2o_block_transfer(struct request *req)
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTBACK:
if (req->nr_sectors > 16)
if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTTHROUGH:
if (req->nr_sectors > 16)
if (blk_rq_sectors(req) > 16)
ctl_flags = 0x001F0004;
else
ctl_flags = 0x001F0010;
@ -800,8 +794,9 @@ static int i2o_block_transfer(struct request *req)
if (c->adaptec) {
u8 cmd[10];
u32 scsi_flags;
u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT;
u16 hwsec;
hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
memset(cmd, 0, 10);
sgl_offset = SGL_OFFSET_12;
@ -827,22 +822,22 @@ static int i2o_block_transfer(struct request *req)
*mptr++ = cpu_to_le32(scsi_flags);
*((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
*((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
*((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
*((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
memcpy(mptr, cmd, 10);
mptr += 4;
*mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
*mptr++ = cpu_to_le32(blk_rq_bytes(req));
} else
#endif
{
msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
*mptr++ = cpu_to_le32(ctl_flags);
*mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
*mptr++ = cpu_to_le32(blk_rq_bytes(req));
*mptr++ =
cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
*mptr++ =
cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
}
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@ -883,7 +878,7 @@ static void i2o_block_request_fn(struct request_queue *q)
struct request *req;
while (!blk_queue_plugged(q)) {
req = elv_next_request(q);
req = blk_peek_request(q);
if (!req)
break;
@ -896,7 +891,7 @@ static void i2o_block_request_fn(struct request_queue *q)
if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
if (!i2o_block_transfer(req)) {
blkdev_dequeue_request(req);
blk_start_request(req);
continue;
} else
osm_info("transfer error\n");
@ -922,8 +917,10 @@ static void i2o_block_request_fn(struct request_queue *q)
blk_stop_queue(q);
break;
}
} else
end_request(req, 0);
} else {
blk_start_request(req);
__blk_end_request_all(req, -EIO);
}
}
};
@ -1082,7 +1079,7 @@ static int i2o_block_probe(struct device *dev)
*/
if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
blk_queue_hardsect_size(queue, le32_to_cpu(blocksize));
blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
} else
osm_warn("unable to get blocksize of %s\n", gd->disk_name);

View File

@ -243,7 +243,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.mrq.cmd = &brq.cmd;
brq.mrq.data = &brq.data;
brq.cmd.arg = req->sector;
brq.cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
brq.cmd.arg <<= 9;
brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
@ -251,7 +251,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
brq.data.blocks = req->nr_sectors;
brq.data.blocks = blk_rq_sectors(req);
/*
* The block layer doesn't support all sector count
@ -301,7 +301,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
* Adjust the sg list so it is the same size as the
* request.
*/
if (brq.data.blocks != req->nr_sectors) {
if (brq.data.blocks != blk_rq_sectors(req)) {
int i, data_size = brq.data.blocks << 9;
struct scatterlist *sg;
@ -352,8 +352,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
printk(KERN_ERR "%s: error %d transferring data,"
" sector %u, nr %u, card status %#x\n",
req->rq_disk->disk_name, brq.data.error,
(unsigned)req->sector,
(unsigned)req->nr_sectors, status);
(unsigned)blk_rq_pos(req),
(unsigned)blk_rq_sectors(req), status);
}
if (brq.stop.error) {
@ -521,7 +521,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
sprintf(md->disk->disk_name, "mmcblk%d", devidx);
blk_queue_hardsect_size(md->queue.queue, 512);
blk_queue_logical_block_size(md->queue.queue, 512);
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*

View File

@ -55,7 +55,7 @@ static int mmc_queue_thread(void *d)
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
if (!blk_queue_plugged(q))
req = elv_next_request(q);
req = blk_fetch_request(q);
mq->req = req;
spin_unlock_irq(q->queue_lock);
@ -88,16 +88,11 @@ static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
int ret;
if (!mq) {
printk(KERN_ERR "MMC: killing requests for dead queue\n");
while ((req = elv_next_request(q)) != NULL) {
do {
ret = __blk_end_request(req, -EIO,
blk_rq_cur_bytes(req));
} while (ret);
}
while ((req = blk_fetch_request(q)) != NULL)
__blk_end_request_all(req, -EIO);
return;
}

View File

@ -47,40 +47,41 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
unsigned long block, nsect;
char *buf;
block = req->sector << 9 >> tr->blkshift;
nsect = req->current_nr_sectors << 9 >> tr->blkshift;
block = blk_rq_pos(req) << 9 >> tr->blkshift;
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
buf = req->buffer;
if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
req->cmd[0] == REQ_LB_OP_DISCARD)
return !tr->discard(dev, block, nsect);
return tr->discard(dev, block, nsect);
if (!blk_fs_request(req))
return 0;
return -EIO;
if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
return 0;
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
get_capacity(req->rq_disk))
return -EIO;
switch(rq_data_dir(req)) {
case READ:
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->readsect(dev, block, buf))
return 0;
return 1;
return -EIO;
return 0;
case WRITE:
if (!tr->writesect)
return 0;
return -EIO;
for (; nsect > 0; nsect--, block++, buf += tr->blksize)
if (tr->writesect(dev, block, buf))
return 0;
return 1;
return -EIO;
return 0;
default:
printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
return 0;
return -EIO;
}
}
@ -88,19 +89,18 @@ static int mtd_blktrans_thread(void *arg)
{
struct mtd_blktrans_ops *tr = arg;
struct request_queue *rq = tr->blkcore_priv->rq;
struct request *req = NULL;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
current->flags |= PF_MEMALLOC;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
struct request *req;
struct mtd_blktrans_dev *dev;
int res = 0;
int res;
req = elv_next_request(rq);
if (!req) {
if (!req && !(req = blk_fetch_request(rq))) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(rq->queue_lock);
schedule();
@ -119,8 +119,13 @@ static int mtd_blktrans_thread(void *arg)
spin_lock_irq(rq->queue_lock);
end_request(req, res);
if (!__blk_end_request_cur(req, res))
req = NULL;
}
if (req)
__blk_end_request_all(req, -EIO);
spin_unlock_irq(rq->queue_lock);
return 0;
@ -373,7 +378,7 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
}
tr->blkcore_priv->rq->queuedata = tr;
blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
if (tr->discard)
blk_queue_set_discard(tr->blkcore_priv->rq,
blktrans_discard_request);

View File

@ -603,7 +603,7 @@ static void dasd_profile_end(struct dasd_block *block,
if (dasd_profile_level != DASD_PROFILE_ON)
return;
sectors = req->nr_sectors;
sectors = blk_rq_sectors(req);
if (!cqr->buildclk || !cqr->startclk ||
!cqr->stopclk || !cqr->endclk ||
!sectors)
@ -1613,15 +1613,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
del_timer(&block->timer);
}
/*
* posts the buffer_cache about a finalized request
*/
static inline void dasd_end_request(struct request *req, int error)
{
if (__blk_end_request(req, error, blk_rq_bytes(req)))
BUG();
}
/*
* Process finished error recovery ccw.
*/
@ -1665,18 +1656,14 @@ static void __dasd_process_request_queue(struct dasd_block *block)
if (basedev->state < DASD_STATE_READY)
return;
/* Now we try to fetch requests from the request queue */
while (!blk_queue_plugged(queue) &&
elv_next_request(queue)) {
req = elv_next_request(queue);
while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting write request %p",
req);
blkdev_dequeue_request(req);
dasd_end_request(req, -EIO);
blk_start_request(req);
__blk_end_request_all(req, -EIO);
continue;
}
cqr = basedev->discipline->build_cp(basedev, block, req);
@ -1704,8 +1691,8 @@ static void __dasd_process_request_queue(struct dasd_block *block)
"CCW creation failed (rc=%ld) "
"on request %p",
PTR_ERR(cqr), req);
blkdev_dequeue_request(req);
dasd_end_request(req, -EIO);
blk_start_request(req);
__blk_end_request_all(req, -EIO);
continue;
}
/*
@ -1714,7 +1701,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
*/
cqr->callback_data = (void *) req;
cqr->status = DASD_CQR_FILLED;
blkdev_dequeue_request(req);
blk_start_request(req);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
dasd_profile_start(block, cqr, req);
}
@ -1731,7 +1718,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status <= 0)
error = status ? status : -EIO;
dasd_end_request(req, error);
__blk_end_request_all(req, error);
}
/*
@ -2003,7 +1990,7 @@ static void dasd_setup_queue(struct dasd_block *block)
{
int max;
blk_queue_hardsect_size(block->request_queue, block->bp_block);
blk_queue_logical_block_size(block->request_queue, block->bp_block);
max = block->base->discipline->max_blocks << block->s2b_shift;
blk_queue_max_sectors(block->request_queue, max);
blk_queue_max_phys_segments(block->request_queue, -1L);
@ -2038,10 +2025,8 @@ static void dasd_flush_request_queue(struct dasd_block *block)
return;
spin_lock_irq(&block->request_queue_lock);
while ((req = elv_next_request(block->request_queue))) {
blkdev_dequeue_request(req);
dasd_end_request(req, -EIO);
}
while ((req = blk_fetch_request(block->request_queue)))
__blk_end_request_all(req, -EIO);
spin_unlock_irq(&block->request_queue_lock);
}

View File

@ -505,8 +505,9 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
first_rec = req->sector >> block->s2b_shift;
last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
first_rec = blk_rq_pos(req) >> block->s2b_shift;
last_rec =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
rq_for_each_segment(bv, req, iter) {

View File

@ -2354,10 +2354,10 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
blksize = block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
/* Calculate record id of first and last block. */
first_rec = first_trk = req->sector >> block->s2b_shift;
first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
first_offs = sector_div(first_trk, blk_per_trk);
last_rec = last_trk =
(req->sector + req->nr_sectors - 1) >> block->s2b_shift;
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
last_offs = sector_div(last_trk, blk_per_trk);
cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
@ -2420,7 +2420,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
private = (struct dasd_eckd_private *) cqr->block->base->private;
blksize = cqr->block->bp_block;
blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
recid = req->sector >> cqr->block->s2b_shift;
recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
ccw = cqr->cpaddr;
/* Skip over define extent & locate record. */
ccw++;

View File

@ -270,8 +270,9 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
return ERR_PTR(-EINVAL);
blksize = block->bp_block;
/* Calculate record id of first and last block. */
first_rec = req->sector >> block->s2b_shift;
last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
first_rec = blk_rq_pos(req) >> block->s2b_shift;
last_rec =
(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
/* Check struct bio and count the number of blocks for the request. */
count = 0;
cidaw = 0;
@ -309,7 +310,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
ccw = cqr->cpaddr;
/* First ccw is define extent. */
define_extent(ccw++, cqr->data, rq_data_dir(req),
block->bp_block, req->sector, req->nr_sectors);
block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);

View File

@ -602,7 +602,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->private_data = dev_info;
dev_info->gd->driverfs_dev = &dev_info->dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096);
seg_byte_size = (dev_info->end - dev_info->start + 1);
set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors

View File

@ -343,7 +343,7 @@ static int __init xpram_setup_blkdev(void)
goto out;
}
blk_queue_make_request(xpram_queues[i], xpram_make_request);
blk_queue_hardsect_size(xpram_queues[i], 4096);
blk_queue_logical_block_size(xpram_queues[i], 4096);
}
/*

Some files were not shown because too many files have changed in this diff Show More