2010-08-03 05:14:58 -06:00
|
|
|
/*
|
|
|
|
* Block data types and constants. Directly include this file only to
|
|
|
|
* break include dependency loop.
|
|
|
|
*/
|
|
|
|
#ifndef __LINUX_BLK_TYPES_H
|
|
|
|
#define __LINUX_BLK_TYPES_H
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
2016-05-30 07:34:30 -06:00
|
|
|
#include <linux/bvec.h>
|
2010-08-03 05:14:58 -06:00
|
|
|
|
|
|
|
struct bio_set;
|
|
|
|
struct bio;
|
|
|
|
struct bio_integrity_payload;
|
|
|
|
struct page;
|
|
|
|
struct block_device;
|
2012-03-05 14:15:27 -07:00
|
|
|
struct io_context;
|
|
|
|
struct cgroup_subsys_state;
|
2015-07-20 07:29:37 -06:00
|
|
|
typedef void (bio_end_io_t) (struct bio *);
|
2010-08-03 05:14:58 -06:00
|
|
|
|
2014-04-04 21:12:29 -06:00
|
|
|
#ifdef CONFIG_BLOCK
|
2010-08-03 05:14:58 -06:00
|
|
|
/*
|
|
|
|
* main unit of I/O for the block layer and lower layers (ie drivers and
|
|
|
|
* stacking drivers)
|
|
|
|
*/
|
|
|
|
struct bio {
|
|
|
|
struct bio *bi_next; /* request queue link */
|
|
|
|
struct block_device *bi_bdev;
|
2015-07-28 13:14:32 -06:00
|
|
|
int bi_error;
|
2016-08-05 15:35:16 -06:00
|
|
|
unsigned int bi_opf; /* bottom bits req flags,
|
|
|
|
* top bits REQ_OP. Use
|
|
|
|
* accessors.
|
2016-06-05 13:32:22 -06:00
|
|
|
*/
|
2016-07-19 03:28:43 -06:00
|
|
|
unsigned short bi_flags; /* status, command, etc */
|
2016-06-05 13:32:20 -06:00
|
|
|
unsigned short bi_ioprio;
|
2010-08-03 05:14:58 -06:00
|
|
|
|
2013-10-11 16:44:27 -06:00
|
|
|
struct bvec_iter bi_iter;
|
2010-08-03 05:14:58 -06:00
|
|
|
|
|
|
|
/* Number of segments in this BIO after
|
|
|
|
* physical address coalescing is performed.
|
|
|
|
*/
|
|
|
|
unsigned int bi_phys_segments;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To keep track of the max segment size, we account for the
|
|
|
|
* sizes of the first and last mergeable segments in this bio.
|
|
|
|
*/
|
|
|
|
unsigned int bi_seg_front_size;
|
|
|
|
unsigned int bi_seg_back_size;
|
|
|
|
|
2015-04-17 16:15:18 -06:00
|
|
|
atomic_t __bi_remaining;
|
2013-11-23 19:34:15 -07:00
|
|
|
|
2010-08-03 05:14:58 -06:00
|
|
|
bio_end_io_t *bi_end_io;
|
|
|
|
|
|
|
|
void *bi_private;
|
2012-03-05 14:15:27 -07:00
|
|
|
#ifdef CONFIG_BLK_CGROUP
|
|
|
|
/*
|
|
|
|
* Optional ioc and css associated with this bio. Put on bio
|
|
|
|
* release. Read comment on top of bio_associate_current().
|
|
|
|
*/
|
|
|
|
struct io_context *bi_ioc;
|
|
|
|
struct cgroup_subsys_state *bi_css;
|
|
|
|
#endif
|
2014-09-26 17:19:56 -06:00
|
|
|
union {
|
2010-08-03 05:14:58 -06:00
|
|
|
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
2014-09-26 17:19:56 -06:00
|
|
|
struct bio_integrity_payload *bi_integrity; /* data integrity */
|
2010-08-03 05:14:58 -06:00
|
|
|
#endif
|
2014-09-26 17:19:56 -06:00
|
|
|
};
|
2010-08-03 05:14:58 -06:00
|
|
|
|
2013-10-11 16:44:27 -06:00
|
|
|
unsigned short bi_vcnt; /* how many bio_vec's */
|
|
|
|
|
2012-09-06 16:34:58 -06:00
|
|
|
/*
|
|
|
|
* Everything starting with bi_max_vecs will be preserved by bio_reset()
|
|
|
|
*/
|
|
|
|
|
2013-10-11 16:44:27 -06:00
|
|
|
unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
|
2012-09-06 16:34:58 -06:00
|
|
|
|
2015-04-17 16:23:59 -06:00
|
|
|
atomic_t __bi_cnt; /* pin count */
|
2012-09-06 16:34:58 -06:00
|
|
|
|
|
|
|
struct bio_vec *bi_io_vec; /* the actual vec list */
|
|
|
|
|
2012-09-06 16:34:55 -06:00
|
|
|
struct bio_set *bi_pool;
|
|
|
|
|
2010-08-03 05:14:58 -06:00
|
|
|
/*
|
|
|
|
* We can inline a number of vecs at the end of the bio, to avoid
|
|
|
|
* double allocations for a small number of bio_vecs. This member
|
|
|
|
* MUST obviously be kept at the very end of the bio.
|
|
|
|
*/
|
|
|
|
struct bio_vec bi_inline_vecs[0];
|
|
|
|
};
|
|
|
|
|
2012-09-06 16:34:58 -06:00
|
|
|
#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
|
|
|
|
|
2010-08-03 05:14:58 -06:00
|
|
|
/*
|
|
|
|
* bio flags
|
|
|
|
*/
|
2015-05-19 09:18:28 -06:00
|
|
|
#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
|
|
|
|
#define BIO_CLONED 2 /* doesn't own data */
|
|
|
|
#define BIO_BOUNCED 3 /* bio is a bounce bio */
|
|
|
|
#define BIO_USER_MAPPED 4 /* contains user pages */
|
|
|
|
#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
|
|
|
|
#define BIO_QUIET 6 /* Make BIO Quiet */
|
2015-06-18 09:19:14 -06:00
|
|
|
#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
|
|
|
|
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
|
2016-10-20 07:12:12 -06:00
|
|
|
#define BIO_THROTTLED 9 /* This bio has already been subjected to
|
|
|
|
* throttling rules. Don't do it again. */
|
2012-09-06 16:34:58 -06:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Flags starting here get preserved by bio_reset() - this includes
|
2016-07-19 03:28:42 -06:00
|
|
|
* BVEC_POOL_IDX()
|
2012-09-06 16:34:58 -06:00
|
|
|
*/
|
2016-07-19 03:28:43 -06:00
|
|
|
#define BIO_RESET_BITS 10
|
2012-09-06 16:34:58 -06:00
|
|
|
|
2010-08-03 05:14:58 -06:00
|
|
|
/*
|
2016-07-19 03:28:42 -06:00
|
|
|
* We support 6 different bvec pools, the last one is magic in that it
|
|
|
|
* is backed by a mempool.
|
2010-08-03 05:14:58 -06:00
|
|
|
*/
|
2016-07-19 03:28:42 -06:00
|
|
|
#define BVEC_POOL_NR 6
|
|
|
|
#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Top 4 bits of bio flags indicate the pool the bvecs came from. We add
|
|
|
|
* 1 to the actual index so that 0 indicates that there are no bvecs to be
|
|
|
|
* freed.
|
|
|
|
*/
|
|
|
|
#define BVEC_POOL_BITS (4)
|
2016-07-19 03:28:43 -06:00
|
|
|
#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
|
2016-07-19 03:28:42 -06:00
|
|
|
#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
|
2010-08-03 05:14:58 -06:00
|
|
|
|
2010-08-10 10:14:27 -06:00
|
|
|
#endif /* CONFIG_BLOCK */
|
|
|
|
|
2010-08-03 05:14:58 -06:00
|
|
|
/*
|
2016-10-28 08:48:16 -06:00
|
|
|
* Operations and flags common to the bio and request structures.
|
|
|
|
* We use 8 bits for encoding the operation, and the remaining 24 for flags.
|
2016-10-20 07:12:15 -06:00
|
|
|
*
|
|
|
|
* The least significant bit of the operation number indicates the data
|
|
|
|
* transfer direction:
|
|
|
|
*
|
|
|
|
* - if the least significant bit is set transfers are TO the device
|
|
|
|
* - if the least significant bit is not set transfers are FROM the device
|
|
|
|
*
|
|
|
|
* If a operation does not transfer data the least significant bit has no
|
|
|
|
* meaning.
|
2010-08-03 05:14:58 -06:00
|
|
|
*/
|
2016-10-28 08:48:16 -06:00
|
|
|
#define REQ_OP_BITS 8
|
|
|
|
#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
|
|
|
|
#define REQ_FLAG_BITS 24
|
|
|
|
|
|
|
|
enum req_opf {
|
2016-10-20 07:12:15 -06:00
|
|
|
/* read sectors from the device */
|
|
|
|
REQ_OP_READ = 0,
|
|
|
|
/* write sectors to the device */
|
|
|
|
REQ_OP_WRITE = 1,
|
|
|
|
/* flush the volatile write cache */
|
|
|
|
REQ_OP_FLUSH = 2,
|
|
|
|
/* discard sectors */
|
|
|
|
REQ_OP_DISCARD = 3,
|
|
|
|
/* get zone information */
|
|
|
|
REQ_OP_ZONE_REPORT = 4,
|
|
|
|
/* securely erase sectors */
|
|
|
|
REQ_OP_SECURE_ERASE = 5,
|
|
|
|
/* seset a zone write pointer */
|
|
|
|
REQ_OP_ZONE_RESET = 6,
|
|
|
|
/* write the same sector many times */
|
|
|
|
REQ_OP_WRITE_SAME = 7,
|
2016-10-28 08:48:16 -06:00
|
|
|
|
|
|
|
REQ_OP_LAST,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum req_flag_bits {
|
|
|
|
__REQ_FAILFAST_DEV = /* no driver retries of device errors */
|
|
|
|
REQ_OP_BITS,
|
2010-08-03 05:14:58 -06:00
|
|
|
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
|
|
|
|
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
|
|
|
|
__REQ_SYNC, /* request is sync (sync write or read) */
|
|
|
|
__REQ_META, /* metadata io request */
|
2011-08-23 06:50:29 -06:00
|
|
|
__REQ_PRIO, /* boost priority in cfq */
|
2016-10-20 07:12:10 -06:00
|
|
|
__REQ_NOMERGE, /* don't touch this for merging */
|
2010-08-03 05:14:58 -06:00
|
|
|
__REQ_NOIDLE, /* don't anticipate more IO after this one */
|
2014-09-26 17:19:56 -06:00
|
|
|
__REQ_INTEGRITY, /* I/O includes block integrity payload */
|
2011-08-11 02:36:03 -06:00
|
|
|
__REQ_FUA, /* forced unit access */
|
2016-06-05 13:32:25 -06:00
|
|
|
__REQ_PREFLUSH, /* request for cache flush */
|
2016-10-20 07:12:11 -06:00
|
|
|
__REQ_RAHEAD, /* read ahead, can fail anytime */
|
2010-08-03 05:14:58 -06:00
|
|
|
__REQ_NR_BITS, /* stops here */
|
|
|
|
};
|
|
|
|
|
2013-05-23 04:25:08 -06:00
|
|
|
#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
|
|
|
|
#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
|
|
|
|
#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
|
|
|
|
#define REQ_SYNC (1ULL << __REQ_SYNC)
|
|
|
|
#define REQ_META (1ULL << __REQ_META)
|
|
|
|
#define REQ_PRIO (1ULL << __REQ_PRIO)
|
2016-10-28 08:48:16 -06:00
|
|
|
#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
|
2013-05-23 04:25:08 -06:00
|
|
|
#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
|
2014-09-26 17:19:56 -06:00
|
|
|
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
|
2016-10-28 08:48:16 -06:00
|
|
|
#define REQ_FUA (1ULL << __REQ_FUA)
|
|
|
|
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
|
|
|
|
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
2010-08-03 05:14:58 -06:00
|
|
|
|
|
|
|
#define REQ_FAILFAST_MASK \
|
|
|
|
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
|
|
|
|
|
2012-09-18 10:19:25 -06:00
|
|
|
#define REQ_NOMERGE_FLAGS \
|
2016-10-20 07:12:13 -06:00
|
|
|
(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
|
2012-09-18 10:19:25 -06:00
|
|
|
|
2016-10-28 08:48:16 -06:00
|
|
|
#define bio_op(bio) \
|
|
|
|
((bio)->bi_opf & REQ_OP_MASK)
|
|
|
|
#define req_op(req) \
|
|
|
|
((req)->cmd_flags & REQ_OP_MASK)
|
2010-08-03 05:14:58 -06:00
|
|
|
|
2016-10-28 08:48:16 -06:00
|
|
|
/* obsolete, don't use in new code */
|
|
|
|
#define bio_set_op_attrs(bio, op, op_flags) \
|
|
|
|
((bio)->bi_opf |= (op | op_flags))
|
2016-08-05 08:11:04 -06:00
|
|
|
|
2016-10-20 07:12:15 -06:00
|
|
|
static inline bool op_is_write(unsigned int op)
|
|
|
|
{
|
|
|
|
return (op & 1);
|
|
|
|
}
|
|
|
|
|
2016-10-28 08:48:16 -06:00
|
|
|
static inline bool op_is_sync(unsigned int op)
|
|
|
|
{
|
|
|
|
return (op & REQ_OP_MASK) == REQ_OP_READ || (op & REQ_SYNC);
|
|
|
|
}
|
2016-08-05 08:11:04 -06:00
|
|
|
|
2015-11-05 10:41:16 -07:00
|
|
|
typedef unsigned int blk_qc_t;
|
|
|
|
#define BLK_QC_T_NONE -1U
|
|
|
|
#define BLK_QC_T_SHIFT 16
|
|
|
|
|
|
|
|
static inline bool blk_qc_t_valid(blk_qc_t cookie)
|
|
|
|
{
|
|
|
|
return cookie != BLK_QC_T_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num)
|
|
|
|
{
|
|
|
|
return tag | (queue_num << BLK_QC_T_SHIFT);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
|
|
|
|
{
|
|
|
|
return cookie >> BLK_QC_T_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie)
|
|
|
|
{
|
2015-11-11 09:37:34 -07:00
|
|
|
return cookie & ((1u << BLK_QC_T_SHIFT) - 1);
|
2015-11-05 10:41:16 -07:00
|
|
|
}
|
|
|
|
|
2010-08-03 05:14:58 -06:00
|
|
|
#endif /* __LINUX_BLK_TYPES_H */
|