1
0
Fork 0

block: make core bits checkpatch compliant

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
hifive-unleashed-5.1
Jens Axboe 2008-01-31 13:03:55 +01:00
parent 22b132102f
commit 6728cb0e63
8 changed files with 116 additions and 141 deletions

View File

@ -26,7 +26,8 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
{ {
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
prepare_flush_fn == NULL) { prepare_flush_fn == NULL) {
printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); printk(KERN_ERR "%s: prepare_flush_fn required\n",
__FUNCTION__);
return -EINVAL; return -EINVAL;
} }
@ -47,7 +48,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_ordered); EXPORT_SYMBOL(blk_queue_ordered);
/* /*
@ -315,5 +315,4 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
bio_put(bio); bio_put(bio);
return ret; return ret;
} }
EXPORT_SYMBOL(blkdev_issue_flush); EXPORT_SYMBOL(blkdev_issue_flush);

View File

@ -3,7 +3,8 @@
* Copyright (C) 1994, Karl Keyte: Added support for disk statistics * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
* Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
* Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
* kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
* - July2000
* bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
*/ */
@ -42,7 +43,7 @@ struct kmem_cache *request_cachep;
/* /*
* For queue allocation * For queue allocation
*/ */
struct kmem_cache *blk_requestq_cachep = NULL; struct kmem_cache *blk_requestq_cachep;
/* /*
* Controlling structure to kblockd * Controlling structure to kblockd
@ -137,7 +138,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
error = -EIO; error = -EIO;
if (unlikely(nbytes > bio->bi_size)) { if (unlikely(nbytes > bio->bi_size)) {
printk("%s: want %u bytes done, only %u left\n", printk(KERN_ERR "%s: want %u bytes done, %u left\n",
__FUNCTION__, nbytes, bio->bi_size); __FUNCTION__, nbytes, bio->bi_size);
nbytes = bio->bi_size; nbytes = bio->bi_size;
} }
@ -161,23 +162,26 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
{ {
int bit; int bit;
printk("%s: dev %s: type=%x, flags=%x\n", msg, printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
rq->cmd_flags); rq->cmd_flags);
printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n",
rq->nr_sectors, (unsigned long long)rq->sector,
rq->current_nr_sectors); rq->nr_sectors,
printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); rq->current_nr_sectors);
printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n",
rq->bio, rq->biotail,
rq->buffer, rq->data,
rq->data_len);
if (blk_pc_request(rq)) { if (blk_pc_request(rq)) {
printk("cdb: "); printk(KERN_INFO " cdb: ");
for (bit = 0; bit < sizeof(rq->cmd); bit++) for (bit = 0; bit < sizeof(rq->cmd); bit++)
printk("%02x ", rq->cmd[bit]); printk("%02x ", rq->cmd[bit]);
printk("\n"); printk("\n");
} }
} }
EXPORT_SYMBOL(blk_dump_rq_flags); EXPORT_SYMBOL(blk_dump_rq_flags);
/* /*
@ -204,7 +208,6 @@ void blk_plug_device(struct request_queue *q)
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
} }
} }
EXPORT_SYMBOL(blk_plug_device); EXPORT_SYMBOL(blk_plug_device);
/* /*
@ -221,7 +224,6 @@ int blk_remove_plug(struct request_queue *q)
del_timer(&q->unplug_timer); del_timer(&q->unplug_timer);
return 1; return 1;
} }
EXPORT_SYMBOL(blk_remove_plug); EXPORT_SYMBOL(blk_remove_plug);
/* /*
@ -328,7 +330,6 @@ void blk_start_queue(struct request_queue *q)
kblockd_schedule_work(&q->unplug_work); kblockd_schedule_work(&q->unplug_work);
} }
} }
EXPORT_SYMBOL(blk_start_queue); EXPORT_SYMBOL(blk_start_queue);
/** /**
@ -408,7 +409,7 @@ void blk_put_queue(struct request_queue *q)
} }
EXPORT_SYMBOL(blk_put_queue); EXPORT_SYMBOL(blk_put_queue);
void blk_cleanup_queue(struct request_queue * q) void blk_cleanup_queue(struct request_queue *q)
{ {
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
@ -419,7 +420,6 @@ void blk_cleanup_queue(struct request_queue * q)
blk_put_queue(q); blk_put_queue(q);
} }
EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_cleanup_queue);
static int blk_init_free_list(struct request_queue *q) static int blk_init_free_list(struct request_queue *q)
@ -575,7 +575,6 @@ int blk_get_queue(struct request_queue *q)
return 1; return 1;
} }
EXPORT_SYMBOL(blk_get_queue); EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_queue *q, struct request *rq) static inline void blk_free_request(struct request_queue *q, struct request *rq)
@ -774,7 +773,7 @@ rq_starved:
*/ */
if (ioc_batching(q, ioc)) if (ioc_batching(q, ioc))
ioc->nr_batch_requests--; ioc->nr_batch_requests--;
rq_init(q, rq); rq_init(q, rq);
blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
@ -888,7 +887,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
elv_requeue_request(q, rq); elv_requeue_request(q, rq);
} }
EXPORT_SYMBOL(blk_requeue_request); EXPORT_SYMBOL(blk_requeue_request);
/** /**
@ -939,7 +937,6 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
blk_start_queueing(q); blk_start_queueing(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
EXPORT_SYMBOL(blk_insert_request); EXPORT_SYMBOL(blk_insert_request);
/* /*
@ -947,7 +944,7 @@ EXPORT_SYMBOL(blk_insert_request);
* queue lock is held and interrupts disabled, as we muck with the * queue lock is held and interrupts disabled, as we muck with the
* request queue list. * request queue list.
*/ */
static inline void add_request(struct request_queue * q, struct request * req) static inline void add_request(struct request_queue *q, struct request *req)
{ {
drive_stat_acct(req, 1); drive_stat_acct(req, 1);
@ -957,7 +954,7 @@ static inline void add_request(struct request_queue * q, struct request * req)
*/ */
__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
} }
/* /*
* disk_round_stats() - Round off the performance stats on a struct * disk_round_stats() - Round off the performance stats on a struct
* disk_stats. * disk_stats.
@ -987,7 +984,6 @@ void disk_round_stats(struct gendisk *disk)
} }
disk->stamp = now; disk->stamp = now;
} }
EXPORT_SYMBOL_GPL(disk_round_stats); EXPORT_SYMBOL_GPL(disk_round_stats);
/* /*
@ -1017,7 +1013,6 @@ void __blk_put_request(struct request_queue *q, struct request *req)
freed_request(q, rw, priv); freed_request(q, rw, priv);
} }
} }
EXPORT_SYMBOL_GPL(__blk_put_request); EXPORT_SYMBOL_GPL(__blk_put_request);
void blk_put_request(struct request *req) void blk_put_request(struct request *req)
@ -1035,7 +1030,6 @@ void blk_put_request(struct request *req)
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
} }
} }
EXPORT_SYMBOL(blk_put_request); EXPORT_SYMBOL(blk_put_request);
void init_request_from_bio(struct request *req, struct bio *bio) void init_request_from_bio(struct request *req, struct bio *bio)
@ -1096,53 +1090,53 @@ static int __make_request(struct request_queue *q, struct bio *bio)
el_ret = elv_merge(q, &req, bio); el_ret = elv_merge(q, &req, bio);
switch (el_ret) { switch (el_ret) {
case ELEVATOR_BACK_MERGE: case ELEVATOR_BACK_MERGE:
BUG_ON(!rq_mergeable(req)); BUG_ON(!rq_mergeable(req));
if (!ll_back_merge_fn(q, req, bio)) if (!ll_back_merge_fn(q, req, bio))
break; break;
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
req->biotail->bi_next = bio; req->biotail->bi_next = bio;
req->biotail = bio; req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio); req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, 0); drive_stat_acct(req, 0);
if (!attempt_back_merge(q, req)) if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret); elv_merged_request(q, req, el_ret);
goto out; goto out;
case ELEVATOR_FRONT_MERGE: case ELEVATOR_FRONT_MERGE:
BUG_ON(!rq_mergeable(req)); BUG_ON(!rq_mergeable(req));
if (!ll_front_merge_fn(q, req, bio)) if (!ll_front_merge_fn(q, req, bio))
break; break;
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
bio->bi_next = req->bio; bio->bi_next = req->bio;
req->bio = bio; req->bio = bio;
/* /*
* may not be valid. if the low level driver said * may not be valid. if the low level driver said
* it didn't need a bounce buffer then it better * it didn't need a bounce buffer then it better
* not touch req->buffer either... * not touch req->buffer either...
*/ */
req->buffer = bio_data(bio); req->buffer = bio_data(bio);
req->current_nr_sectors = bio_cur_sectors(bio); req->current_nr_sectors = bio_cur_sectors(bio);
req->hard_cur_sectors = req->current_nr_sectors; req->hard_cur_sectors = req->current_nr_sectors;
req->sector = req->hard_sector = bio->bi_sector; req->sector = req->hard_sector = bio->bi_sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio); req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, 0); drive_stat_acct(req, 0);
if (!attempt_front_merge(q, req)) if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret); elv_merged_request(q, req, el_ret);
goto out; goto out;
/* ELV_NO_MERGE: elevator says don't/can't merge. */ /* ELV_NO_MERGE: elevator says don't/can't merge. */
default: default:
; ;
} }
get_rq: get_rq:
@ -1350,7 +1344,7 @@ end_io:
} }
if (unlikely(nr_sectors > q->max_hw_sectors)) { if (unlikely(nr_sectors > q->max_hw_sectors)) {
printk("bio too big device %s (%u > %u)\n", printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b), bdevname(bio->bi_bdev, b),
bio_sectors(bio), bio_sectors(bio),
q->max_hw_sectors); q->max_hw_sectors);
@ -1439,7 +1433,6 @@ void generic_make_request(struct bio *bio)
} while (bio); } while (bio);
current->bio_tail = NULL; /* deactivate */ current->bio_tail = NULL; /* deactivate */
} }
EXPORT_SYMBOL(generic_make_request); EXPORT_SYMBOL(generic_make_request);
/** /**
@ -1480,13 +1473,12 @@ void submit_bio(int rw, struct bio *bio)
current->comm, task_pid_nr(current), current->comm, task_pid_nr(current),
(rw & WRITE) ? "WRITE" : "READ", (rw & WRITE) ? "WRITE" : "READ",
(unsigned long long)bio->bi_sector, (unsigned long long)bio->bi_sector,
bdevname(bio->bi_bdev,b)); bdevname(bio->bi_bdev, b));
} }
} }
generic_make_request(bio); generic_make_request(bio);
} }
EXPORT_SYMBOL(submit_bio); EXPORT_SYMBOL(submit_bio);
/** /**
@ -1518,9 +1510,8 @@ static int __end_that_request_first(struct request *req, int error,
if (!blk_pc_request(req)) if (!blk_pc_request(req))
req->errors = 0; req->errors = 0;
if (error) { if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) {
if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n",
printk("end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?", req->rq_disk ? req->rq_disk->disk_name : "?",
(unsigned long long)req->sector); (unsigned long long)req->sector);
} }
@ -1554,9 +1545,9 @@ static int __end_that_request_first(struct request *req, int error,
if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
blk_dump_rq_flags(req, "__end_that"); blk_dump_rq_flags(req, "__end_that");
printk("%s: bio idx %d >= vcnt %d\n", printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
__FUNCTION__, __FUNCTION__, bio->bi_idx,
bio->bi_idx, bio->bi_vcnt); bio->bi_vcnt);
break; break;
} }
@ -1582,7 +1573,8 @@ static int __end_that_request_first(struct request *req, int error,
total_bytes += nbytes; total_bytes += nbytes;
nr_bytes -= nbytes; nr_bytes -= nbytes;
if ((bio = req->bio)) { bio = req->bio;
if (bio) {
/* /*
* end more in this run, or just return 'not-done' * end more in this run, or just return 'not-done'
*/ */
@ -1626,15 +1618,16 @@ static void blk_done_softirq(struct softirq_action *h)
local_irq_enable(); local_irq_enable();
while (!list_empty(&local_list)) { while (!list_empty(&local_list)) {
struct request *rq = list_entry(local_list.next, struct request, donelist); struct request *rq;
rq = list_entry(local_list.next, struct request, donelist);
list_del_init(&rq->donelist); list_del_init(&rq->donelist);
rq->q->softirq_done_fn(rq); rq->q->softirq_done_fn(rq);
} }
} }
static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action, static int __cpuinit blk_cpu_notify(struct notifier_block *self,
void *hcpu) unsigned long action, void *hcpu)
{ {
/* /*
* If a CPU goes away, splice its entries to the current CPU * If a CPU goes away, splice its entries to the current CPU
@ -1676,7 +1669,7 @@ void blk_complete_request(struct request *req)
unsigned long flags; unsigned long flags;
BUG_ON(!req->q->softirq_done_fn); BUG_ON(!req->q->softirq_done_fn);
local_irq_save(flags); local_irq_save(flags);
cpu_list = &__get_cpu_var(blk_cpu_done); cpu_list = &__get_cpu_var(blk_cpu_done);
@ -1685,9 +1678,8 @@ void blk_complete_request(struct request *req)
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(blk_complete_request); EXPORT_SYMBOL(blk_complete_request);
/* /*
* queue lock must be held * queue lock must be held
*/ */
@ -2002,7 +1994,6 @@ int kblockd_schedule_work(struct work_struct *work)
{ {
return queue_work(kblockd_workqueue, work); return queue_work(kblockd_workqueue, work);
} }
EXPORT_SYMBOL(kblockd_schedule_work); EXPORT_SYMBOL(kblockd_schedule_work);
void kblockd_flush_work(struct work_struct *work) void kblockd_flush_work(struct work_struct *work)

View File

@ -101,5 +101,4 @@ int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
return err; return err;
} }
EXPORT_SYMBOL(blk_execute_rq); EXPORT_SYMBOL(blk_execute_rq);

View File

@ -53,7 +53,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers * direct dma. else, set up kernel bounce buffers
*/ */
uaddr = (unsigned long) ubuf; uaddr = (unsigned long) ubuf;
if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) if (!(uaddr & queue_dma_alignment(q)) &&
!(len & queue_dma_alignment(q)))
bio = bio_map_user(q, NULL, uaddr, len, reading); bio = bio_map_user(q, NULL, uaddr, len, reading);
else else
bio = bio_copy_user(q, uaddr, len, reading); bio = bio_copy_user(q, uaddr, len, reading);
@ -144,7 +145,6 @@ unmap_rq:
blk_rq_unmap_user(bio); blk_rq_unmap_user(bio);
return ret; return ret;
} }
EXPORT_SYMBOL(blk_rq_map_user); EXPORT_SYMBOL(blk_rq_map_user);
/** /**
@ -179,7 +179,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
/* we don't allow misaligned data like bio_map_user() does. If the /* we don't allow misaligned data like bio_map_user() does. If the
* user is using sg, they're expected to know the alignment constraints * user is using sg, they're expected to know the alignment constraints
* and respect them accordingly */ * and respect them accordingly */
bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); bio = bio_map_user_iov(q, NULL, iov, iov_count,
rq_data_dir(rq) == READ);
if (IS_ERR(bio)) if (IS_ERR(bio))
return PTR_ERR(bio); return PTR_ERR(bio);
@ -194,7 +195,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq->buffer = rq->data = NULL; rq->buffer = rq->data = NULL;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_rq_map_user_iov); EXPORT_SYMBOL(blk_rq_map_user_iov);
/** /**
@ -227,7 +227,6 @@ int blk_rq_unmap_user(struct bio *bio)
return ret; return ret;
} }
EXPORT_SYMBOL(blk_rq_unmap_user); EXPORT_SYMBOL(blk_rq_unmap_user);
/** /**
@ -260,5 +259,4 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
rq->buffer = rq->data = NULL; rq->buffer = rq->data = NULL;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_rq_map_kern); EXPORT_SYMBOL(blk_rq_map_kern);

View File

@ -32,7 +32,7 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect)
* size, something has gone terribly wrong * size, something has gone terribly wrong
*/ */
if (rq->nr_sectors < rq->current_nr_sectors) { if (rq->nr_sectors < rq->current_nr_sectors) {
printk("blk: request botched\n"); printk(KERN_ERR "blk: request botched\n");
rq->nr_sectors = rq->current_nr_sectors; rq->nr_sectors = rq->current_nr_sectors;
} }
} }
@ -235,7 +235,6 @@ new_segment:
return nsegs; return nsegs;
} }
EXPORT_SYMBOL(blk_rq_map_sg); EXPORT_SYMBOL(blk_rq_map_sg);
static inline int ll_new_mergeable(struct request_queue *q, static inline int ll_new_mergeable(struct request_queue *q,
@ -305,8 +304,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio); blk_recount_segments(q, bio);
len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio))
!BIOVEC_VIRT_OVERSIZE(len)) { && !BIOVEC_VIRT_OVERSIZE(len)) {
int mergeable = ll_new_mergeable(q, req, bio); int mergeable = ll_new_mergeable(q, req, bio);
if (mergeable) { if (mergeable) {
@ -321,7 +320,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
return ll_new_hw_segment(q, req, bio); return ll_new_hw_segment(q, req, bio);
} }
int ll_front_merge_fn(struct request_queue *q, struct request *req, int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio) struct bio *bio)
{ {
unsigned short max_sectors; unsigned short max_sectors;
@ -388,7 +387,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
if (blk_hw_contig_segment(q, req->biotail, next->bio)) { if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; int len = req->biotail->bi_hw_back_size +
next->bio->bi_hw_front_size;
/* /*
* propagate the combined length to the end of the requests * propagate the combined length to the end of the requests
*/ */

View File

@ -10,8 +10,10 @@
#include "blk.h" #include "blk.h"
unsigned long blk_max_low_pfn, blk_max_pfn; unsigned long blk_max_low_pfn;
EXPORT_SYMBOL(blk_max_low_pfn); EXPORT_SYMBOL(blk_max_low_pfn);
unsigned long blk_max_pfn;
EXPORT_SYMBOL(blk_max_pfn); EXPORT_SYMBOL(blk_max_pfn);
/** /**
@ -29,7 +31,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
{ {
q->prep_rq_fn = pfn; q->prep_rq_fn = pfn;
} }
EXPORT_SYMBOL(blk_queue_prep_rq); EXPORT_SYMBOL(blk_queue_prep_rq);
/** /**
@ -52,14 +53,12 @@ void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
{ {
q->merge_bvec_fn = mbfn; q->merge_bvec_fn = mbfn;
} }
EXPORT_SYMBOL(blk_queue_merge_bvec); EXPORT_SYMBOL(blk_queue_merge_bvec);
void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
{ {
q->softirq_done_fn = fn; q->softirq_done_fn = fn;
} }
EXPORT_SYMBOL(blk_queue_softirq_done); EXPORT_SYMBOL(blk_queue_softirq_done);
/** /**
@ -84,7 +83,7 @@ EXPORT_SYMBOL(blk_queue_softirq_done);
* __bio_kmap_atomic() to get a temporary kernel mapping, or by calling * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
* blk_queue_bounce() to create a buffer in normal memory. * blk_queue_bounce() to create a buffer in normal memory.
**/ **/
void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
{ {
/* /*
* set defaults * set defaults
@ -93,7 +92,8 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
q->make_request_fn = mfn; q->make_request_fn = mfn;
q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
q->backing_dev_info.state = 0; q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
blk_queue_max_sectors(q, SAFE_MAX_SECTORS); blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
@ -117,7 +117,6 @@ void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn)
*/ */
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
} }
EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(blk_queue_make_request);
/** /**
@ -133,7 +132,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
**/ **/
void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
{ {
unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
int dma = 0; int dma = 0;
q->bounce_gfp = GFP_NOIO; q->bounce_gfp = GFP_NOIO;
@ -141,21 +140,20 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
/* Assume anything <= 4GB can be handled by IOMMU. /* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */ know of a way to test this here. */
if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1; dma = 1;
q->bounce_pfn = max_low_pfn; q->bounce_pfn = max_low_pfn;
#else #else
if (bounce_pfn < blk_max_low_pfn) if (b_pfn < blk_max_low_pfn)
dma = 1; dma = 1;
q->bounce_pfn = bounce_pfn; q->bounce_pfn = b_pfn;
#endif #endif
if (dma) { if (dma) {
init_emergency_isa_pool(); init_emergency_isa_pool();
q->bounce_gfp = GFP_NOIO | GFP_DMA; q->bounce_gfp = GFP_NOIO | GFP_DMA;
q->bounce_pfn = bounce_pfn; q->bounce_pfn = b_pfn;
} }
} }
EXPORT_SYMBOL(blk_queue_bounce_limit); EXPORT_SYMBOL(blk_queue_bounce_limit);
/** /**
@ -171,7 +169,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
{ {
if ((max_sectors << 9) < PAGE_CACHE_SIZE) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_sectors);
} }
if (BLK_DEF_MAX_SECTORS > max_sectors) if (BLK_DEF_MAX_SECTORS > max_sectors)
@ -181,7 +180,6 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
q->max_hw_sectors = max_sectors; q->max_hw_sectors = max_sectors;
} }
} }
EXPORT_SYMBOL(blk_queue_max_sectors); EXPORT_SYMBOL(blk_queue_max_sectors);
/** /**
@ -199,12 +197,12 @@ void blk_queue_max_phys_segments(struct request_queue *q,
{ {
if (!max_segments) { if (!max_segments) {
max_segments = 1; max_segments = 1;
printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_segments);
} }
q->max_phys_segments = max_segments; q->max_phys_segments = max_segments;
} }
EXPORT_SYMBOL(blk_queue_max_phys_segments); EXPORT_SYMBOL(blk_queue_max_phys_segments);
/** /**
@ -223,12 +221,12 @@ void blk_queue_max_hw_segments(struct request_queue *q,
{ {
if (!max_segments) { if (!max_segments) {
max_segments = 1; max_segments = 1;
printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_segments);
} }
q->max_hw_segments = max_segments; q->max_hw_segments = max_segments;
} }
EXPORT_SYMBOL(blk_queue_max_hw_segments); EXPORT_SYMBOL(blk_queue_max_hw_segments);
/** /**
@ -244,12 +242,12 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{ {
if (max_size < PAGE_CACHE_SIZE) { if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE; max_size = PAGE_CACHE_SIZE;
printk("%s: set to minimum %d\n", __FUNCTION__, max_size); printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
max_size);
} }
q->max_segment_size = max_size; q->max_segment_size = max_size;
} }
EXPORT_SYMBOL(blk_queue_max_segment_size); EXPORT_SYMBOL(blk_queue_max_segment_size);
/** /**
@ -267,7 +265,6 @@ void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
{ {
q->hardsect_size = size; q->hardsect_size = size;
} }
EXPORT_SYMBOL(blk_queue_hardsect_size); EXPORT_SYMBOL(blk_queue_hardsect_size);
/* /*
@ -283,17 +280,16 @@ EXPORT_SYMBOL(blk_queue_hardsect_size);
void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
{ {
/* zero is "infinity" */ /* zero is "infinity" */
t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
t->max_segment_size = min(t->max_segment_size,b->max_segment_size); t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
t->hardsect_size = max(t->hardsect_size,b->hardsect_size); t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
} }
EXPORT_SYMBOL(blk_queue_stack_limits); EXPORT_SYMBOL(blk_queue_stack_limits);
/** /**
@ -332,7 +328,6 @@ int blk_queue_dma_drain(struct request_queue *q, void *buf,
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(blk_queue_dma_drain); EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
/** /**
@ -344,12 +339,12 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{ {
if (mask < PAGE_CACHE_SIZE - 1) { if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1; mask = PAGE_CACHE_SIZE - 1;
printk("%s: set to minimum %lx\n", __FUNCTION__, mask); printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
mask);
} }
q->seg_boundary_mask = mask; q->seg_boundary_mask = mask;
} }
EXPORT_SYMBOL(blk_queue_segment_boundary); EXPORT_SYMBOL(blk_queue_segment_boundary);
/** /**
@ -366,7 +361,6 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
{ {
q->dma_alignment = mask; q->dma_alignment = mask;
} }
EXPORT_SYMBOL(blk_queue_dma_alignment); EXPORT_SYMBOL(blk_queue_dma_alignment);
/** /**
@ -390,7 +384,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
if (mask > q->dma_alignment) if (mask > q->dma_alignment)
q->dma_alignment = mask; q->dma_alignment = mask;
} }
EXPORT_SYMBOL(blk_queue_update_dma_alignment); EXPORT_SYMBOL(blk_queue_update_dma_alignment);
int __init blk_settings_init(void) int __init blk_settings_init(void)

View File

@ -207,12 +207,13 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t length) const char *page, size_t length)
{ {
struct queue_sysfs_entry *entry = to_queue(attr); struct queue_sysfs_entry *entry = to_queue(attr);
struct request_queue *q = container_of(kobj, struct request_queue, kobj); struct request_queue *q;
ssize_t res; ssize_t res;
if (!entry->store) if (!entry->store)
return -EIO; return -EIO;
q = container_of(kobj, struct request_queue, kobj);
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);

View File

@ -21,7 +21,6 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
{ {
return blk_map_queue_find_tag(q->queue_tags, tag); return blk_map_queue_find_tag(q->queue_tags, tag);
} }
EXPORT_SYMBOL(blk_queue_find_tag); EXPORT_SYMBOL(blk_queue_find_tag);
/** /**
@ -99,7 +98,6 @@ void blk_queue_free_tags(struct request_queue *q)
{ {
clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
} }
EXPORT_SYMBOL(blk_queue_free_tags); EXPORT_SYMBOL(blk_queue_free_tags);
static int static int
@ -185,7 +183,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
if (!tags) if (!tags)
goto fail; goto fail;
} else if (q->queue_tags) { } else if (q->queue_tags) {
if ((rc = blk_queue_resize_tags(q, depth))) rc = blk_queue_resize_tags(q, depth);
if (rc)
return rc; return rc;
set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
return 0; return 0;
@ -203,7 +202,6 @@ fail:
kfree(tags); kfree(tags);
return -ENOMEM; return -ENOMEM;
} }
EXPORT_SYMBOL(blk_queue_init_tags); EXPORT_SYMBOL(blk_queue_init_tags);
/** /**
@ -260,7 +258,6 @@ int blk_queue_resize_tags(struct request_queue *q, int new_depth)
kfree(tag_map); kfree(tag_map);
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_resize_tags); EXPORT_SYMBOL(blk_queue_resize_tags);
/** /**
@ -313,7 +310,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
clear_bit_unlock(tag, bqt->tag_map); clear_bit_unlock(tag, bqt->tag_map);
bqt->busy--; bqt->busy--;
} }
EXPORT_SYMBOL(blk_queue_end_tag); EXPORT_SYMBOL(blk_queue_end_tag);
/** /**
@ -340,7 +336,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
int tag; int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) { if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d", "%s: request %p for device [%s] already tagged %d",
__FUNCTION__, rq, __FUNCTION__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
@ -370,7 +366,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
bqt->busy++; bqt->busy++;
return 0; return 0;
} }
EXPORT_SYMBOL(blk_queue_start_tag); EXPORT_SYMBOL(blk_queue_start_tag);
/** /**
@ -392,5 +387,4 @@ void blk_queue_invalidate_tags(struct request_queue *q)
list_for_each_safe(tmp, n, &q->tag_busy_list) list_for_each_safe(tmp, n, &q->tag_busy_list)
blk_requeue_request(q, list_entry_rq(tmp)); blk_requeue_request(q, list_entry_rq(tmp));
} }
EXPORT_SYMBOL(blk_queue_invalidate_tags); EXPORT_SYMBOL(blk_queue_invalidate_tags);