mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
block: Introduce the type blk_opf_t
Introduce the type blk_opf_t for the request operation and flags (REQ_OP_* and REQ_*). This type will be used to improve documentation of the block layer code and also to allow sparse to verify whether request flags are used correctly. Cc: Christoph Hellwig <hch@lst.de> Cc: Ming Lei <ming.lei@redhat.com> Cc: Hannes Reinecke <hare@suse.de> Cc: Damien Le Moal <damien.lemoal@wdc.com> Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20220714180729.1065367-6-bvanassche@acm.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2d9b02be73
commit
342a72a334
@ -240,6 +240,8 @@ static inline void bio_issue_init(struct bio_issue *issue,
|
|||||||
((u64)size << BIO_ISSUE_SIZE_SHIFT));
|
((u64)size << BIO_ISSUE_SIZE_SHIFT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef __u32 __bitwise blk_opf_t;
|
||||||
|
|
||||||
typedef unsigned int blk_qc_t;
|
typedef unsigned int blk_qc_t;
|
||||||
#define BLK_QC_T_NONE -1U
|
#define BLK_QC_T_NONE -1U
|
||||||
|
|
||||||
@ -250,7 +252,7 @@ typedef unsigned int blk_qc_t;
|
|||||||
struct bio {
|
struct bio {
|
||||||
struct bio *bi_next; /* request queue link */
|
struct bio *bi_next; /* request queue link */
|
||||||
struct block_device *bi_bdev;
|
struct block_device *bi_bdev;
|
||||||
unsigned int bi_opf; /* bottom bits REQ_OP, top bits
|
blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits
|
||||||
* req_flags.
|
* req_flags.
|
||||||
*/
|
*/
|
||||||
unsigned short bi_flags; /* BIO_* below */
|
unsigned short bi_flags; /* BIO_* below */
|
||||||
@ -338,7 +340,7 @@ enum {
|
|||||||
typedef __u32 __bitwise blk_mq_req_flags_t;
|
typedef __u32 __bitwise blk_mq_req_flags_t;
|
||||||
|
|
||||||
#define REQ_OP_BITS 8
|
#define REQ_OP_BITS 8
|
||||||
#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
|
#define REQ_OP_MASK (__force blk_opf_t)((1 << REQ_OP_BITS) - 1)
|
||||||
#define REQ_FLAG_BITS 24
|
#define REQ_FLAG_BITS 24
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -356,35 +358,35 @@ typedef __u32 __bitwise blk_mq_req_flags_t;
|
|||||||
*/
|
*/
|
||||||
enum req_op {
|
enum req_op {
|
||||||
/* read sectors from the device */
|
/* read sectors from the device */
|
||||||
REQ_OP_READ = 0,
|
REQ_OP_READ = (__force blk_opf_t)0,
|
||||||
/* write sectors to the device */
|
/* write sectors to the device */
|
||||||
REQ_OP_WRITE = 1,
|
REQ_OP_WRITE = (__force blk_opf_t)1,
|
||||||
/* flush the volatile write cache */
|
/* flush the volatile write cache */
|
||||||
REQ_OP_FLUSH = 2,
|
REQ_OP_FLUSH = (__force blk_opf_t)2,
|
||||||
/* discard sectors */
|
/* discard sectors */
|
||||||
REQ_OP_DISCARD = 3,
|
REQ_OP_DISCARD = (__force blk_opf_t)3,
|
||||||
/* securely erase sectors */
|
/* securely erase sectors */
|
||||||
REQ_OP_SECURE_ERASE = 5,
|
REQ_OP_SECURE_ERASE = (__force blk_opf_t)5,
|
||||||
/* write the zero filled sector many times */
|
/* write the zero filled sector many times */
|
||||||
REQ_OP_WRITE_ZEROES = 9,
|
REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
|
||||||
/* Open a zone */
|
/* Open a zone */
|
||||||
REQ_OP_ZONE_OPEN = 10,
|
REQ_OP_ZONE_OPEN = (__force blk_opf_t)10,
|
||||||
/* Close a zone */
|
/* Close a zone */
|
||||||
REQ_OP_ZONE_CLOSE = 11,
|
REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11,
|
||||||
/* Transition a zone to full */
|
/* Transition a zone to full */
|
||||||
REQ_OP_ZONE_FINISH = 12,
|
REQ_OP_ZONE_FINISH = (__force blk_opf_t)12,
|
||||||
/* write data at the current zone write pointer */
|
/* write data at the current zone write pointer */
|
||||||
REQ_OP_ZONE_APPEND = 13,
|
REQ_OP_ZONE_APPEND = (__force blk_opf_t)13,
|
||||||
/* reset a zone write pointer */
|
/* reset a zone write pointer */
|
||||||
REQ_OP_ZONE_RESET = 15,
|
REQ_OP_ZONE_RESET = (__force blk_opf_t)15,
|
||||||
/* reset all the zone present on the device */
|
/* reset all the zone present on the device */
|
||||||
REQ_OP_ZONE_RESET_ALL = 17,
|
REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17,
|
||||||
|
|
||||||
/* Driver private requests */
|
/* Driver private requests */
|
||||||
REQ_OP_DRV_IN = 34,
|
REQ_OP_DRV_IN = (__force blk_opf_t)34,
|
||||||
REQ_OP_DRV_OUT = 35,
|
REQ_OP_DRV_OUT = (__force blk_opf_t)35,
|
||||||
|
|
||||||
REQ_OP_LAST,
|
REQ_OP_LAST = (__force blk_opf_t)36,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum req_flag_bits {
|
enum req_flag_bits {
|
||||||
@ -425,28 +427,31 @@ enum req_flag_bits {
|
|||||||
__REQ_NR_BITS, /* stops here */
|
__REQ_NR_BITS, /* stops here */
|
||||||
};
|
};
|
||||||
|
|
||||||
#define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
|
#define REQ_FAILFAST_DEV \
|
||||||
#define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
|
(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DEV)
|
||||||
#define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
|
#define REQ_FAILFAST_TRANSPORT \
|
||||||
#define REQ_SYNC (1ULL << __REQ_SYNC)
|
(__force blk_opf_t)(1ULL << __REQ_FAILFAST_TRANSPORT)
|
||||||
#define REQ_META (1ULL << __REQ_META)
|
#define REQ_FAILFAST_DRIVER \
|
||||||
#define REQ_PRIO (1ULL << __REQ_PRIO)
|
(__force blk_opf_t)(1ULL << __REQ_FAILFAST_DRIVER)
|
||||||
#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
|
#define REQ_SYNC (__force blk_opf_t)(1ULL << __REQ_SYNC)
|
||||||
#define REQ_IDLE (1ULL << __REQ_IDLE)
|
#define REQ_META (__force blk_opf_t)(1ULL << __REQ_META)
|
||||||
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
|
#define REQ_PRIO (__force blk_opf_t)(1ULL << __REQ_PRIO)
|
||||||
#define REQ_FUA (1ULL << __REQ_FUA)
|
#define REQ_NOMERGE (__force blk_opf_t)(1ULL << __REQ_NOMERGE)
|
||||||
#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
|
#define REQ_IDLE (__force blk_opf_t)(1ULL << __REQ_IDLE)
|
||||||
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
#define REQ_INTEGRITY (__force blk_opf_t)(1ULL << __REQ_INTEGRITY)
|
||||||
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
|
#define REQ_FUA (__force blk_opf_t)(1ULL << __REQ_FUA)
|
||||||
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
|
#define REQ_PREFLUSH (__force blk_opf_t)(1ULL << __REQ_PREFLUSH)
|
||||||
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
|
#define REQ_RAHEAD (__force blk_opf_t)(1ULL << __REQ_RAHEAD)
|
||||||
|
#define REQ_BACKGROUND (__force blk_opf_t)(1ULL << __REQ_BACKGROUND)
|
||||||
|
#define REQ_NOWAIT (__force blk_opf_t)(1ULL << __REQ_NOWAIT)
|
||||||
|
#define REQ_CGROUP_PUNT (__force blk_opf_t)(1ULL << __REQ_CGROUP_PUNT)
|
||||||
|
|
||||||
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
#define REQ_NOUNMAP (__force blk_opf_t)(1ULL << __REQ_NOUNMAP)
|
||||||
#define REQ_POLLED (1ULL << __REQ_POLLED)
|
#define REQ_POLLED (__force blk_opf_t)(1ULL << __REQ_POLLED)
|
||||||
#define REQ_ALLOC_CACHE (1ULL << __REQ_ALLOC_CACHE)
|
#define REQ_ALLOC_CACHE (__force blk_opf_t)(1ULL << __REQ_ALLOC_CACHE)
|
||||||
|
|
||||||
#define REQ_DRV (1ULL << __REQ_DRV)
|
#define REQ_DRV (__force blk_opf_t)(1ULL << __REQ_DRV)
|
||||||
#define REQ_SWAP (1ULL << __REQ_SWAP)
|
#define REQ_SWAP (__force blk_opf_t)(1ULL << __REQ_SWAP)
|
||||||
|
|
||||||
#define REQ_FAILFAST_MASK \
|
#define REQ_FAILFAST_MASK \
|
||||||
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
|
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
|
||||||
@ -469,22 +474,22 @@ static inline enum req_op bio_op(const struct bio *bio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* obsolete, don't use in new code */
|
/* obsolete, don't use in new code */
|
||||||
static inline void bio_set_op_attrs(struct bio *bio, unsigned op,
|
static inline void bio_set_op_attrs(struct bio *bio, enum req_op op,
|
||||||
unsigned op_flags)
|
blk_opf_t op_flags)
|
||||||
{
|
{
|
||||||
bio->bi_opf = op | op_flags;
|
bio->bi_opf = op | op_flags;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool op_is_write(unsigned int op)
|
static inline bool op_is_write(blk_opf_t op)
|
||||||
{
|
{
|
||||||
return (op & 1);
|
return !!(op & (__force blk_opf_t)1);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if the bio or request is one that needs special treatment in the
|
* Check if the bio or request is one that needs special treatment in the
|
||||||
* flush state machine.
|
* flush state machine.
|
||||||
*/
|
*/
|
||||||
static inline bool op_is_flush(unsigned int op)
|
static inline bool op_is_flush(blk_opf_t op)
|
||||||
{
|
{
|
||||||
return op & (REQ_FUA | REQ_PREFLUSH);
|
return op & (REQ_FUA | REQ_PREFLUSH);
|
||||||
}
|
}
|
||||||
@ -494,13 +499,13 @@ static inline bool op_is_flush(unsigned int op)
|
|||||||
* PREFLUSH flag. Other operations may be marked as synchronous using the
|
* PREFLUSH flag. Other operations may be marked as synchronous using the
|
||||||
* REQ_SYNC flag.
|
* REQ_SYNC flag.
|
||||||
*/
|
*/
|
||||||
static inline bool op_is_sync(unsigned int op)
|
static inline bool op_is_sync(blk_opf_t op)
|
||||||
{
|
{
|
||||||
return (op & REQ_OP_MASK) == REQ_OP_READ ||
|
return (op & REQ_OP_MASK) == REQ_OP_READ ||
|
||||||
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
|
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool op_is_discard(unsigned int op)
|
static inline bool op_is_discard(blk_opf_t op)
|
||||||
{
|
{
|
||||||
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
|
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user