mirror of
https://github.com/torvalds/linux.git
synced 2024-12-26 12:52:30 +00:00
block: kill off q->flush_flags
Now that we converted everything to the newer block write cache interface, kill off the queue flush_flags and queueable flush entries. Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
c875a7093f
commit
c888a8f95a
@ -1964,7 +1964,8 @@ generic_make_request_checks(struct bio *bio)
|
|||||||
* drivers without flush support don't have to worry
|
* drivers without flush support don't have to worry
|
||||||
* about them.
|
* about them.
|
||||||
*/
|
*/
|
||||||
if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
|
if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
|
||||||
|
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
|
||||||
bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
|
bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
|
||||||
if (!nr_sectors) {
|
if (!nr_sectors) {
|
||||||
err = 0;
|
err = 0;
|
||||||
|
@ -95,17 +95,18 @@ enum {
|
|||||||
static bool blk_kick_flush(struct request_queue *q,
|
static bool blk_kick_flush(struct request_queue *q,
|
||||||
struct blk_flush_queue *fq);
|
struct blk_flush_queue *fq);
|
||||||
|
|
||||||
static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
|
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
|
||||||
{
|
{
|
||||||
unsigned int policy = 0;
|
unsigned int policy = 0;
|
||||||
|
|
||||||
if (blk_rq_sectors(rq))
|
if (blk_rq_sectors(rq))
|
||||||
policy |= REQ_FSEQ_DATA;
|
policy |= REQ_FSEQ_DATA;
|
||||||
|
|
||||||
if (fflags & REQ_FLUSH) {
|
if (fflags & (1UL << QUEUE_FLAG_WC)) {
|
||||||
if (rq->cmd_flags & REQ_FLUSH)
|
if (rq->cmd_flags & REQ_FLUSH)
|
||||||
policy |= REQ_FSEQ_PREFLUSH;
|
policy |= REQ_FSEQ_PREFLUSH;
|
||||||
if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
|
if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
|
||||||
|
(rq->cmd_flags & REQ_FUA))
|
||||||
policy |= REQ_FSEQ_POSTFLUSH;
|
policy |= REQ_FSEQ_POSTFLUSH;
|
||||||
}
|
}
|
||||||
return policy;
|
return policy;
|
||||||
@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
|
|||||||
void blk_insert_flush(struct request *rq)
|
void blk_insert_flush(struct request *rq)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
unsigned int fflags = q->flush_flags; /* may change, cache */
|
unsigned long fflags = q->queue_flags; /* may change, cache */
|
||||||
unsigned int policy = blk_flush_policy(fflags, rq);
|
unsigned int policy = blk_flush_policy(fflags, rq);
|
||||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
|
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
|
||||||
|
|
||||||
@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq)
|
|||||||
* REQ_FLUSH and FUA for the driver.
|
* REQ_FLUSH and FUA for the driver.
|
||||||
*/
|
*/
|
||||||
rq->cmd_flags &= ~REQ_FLUSH;
|
rq->cmd_flags &= ~REQ_FLUSH;
|
||||||
if (!(fflags & REQ_FUA))
|
if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
|
||||||
rq->cmd_flags &= ~REQ_FUA;
|
rq->cmd_flags &= ~REQ_FUA;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -822,7 +822,12 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
|||||||
|
|
||||||
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
|
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
|
||||||
{
|
{
|
||||||
q->flush_not_queueable = !queueable;
|
spin_lock_irq(q->queue_lock);
|
||||||
|
if (queueable)
|
||||||
|
clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
|
||||||
|
else
|
||||||
|
set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
|
||||||
|
spin_unlock_irq(q->queue_lock);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
|
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
|
||||||
|
|
||||||
@ -837,16 +842,13 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
|
|||||||
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
|
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
|
||||||
{
|
{
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
if (wc) {
|
if (wc)
|
||||||
queue_flag_set(QUEUE_FLAG_WC, q);
|
queue_flag_set(QUEUE_FLAG_WC, q);
|
||||||
q->flush_flags = REQ_FLUSH;
|
else
|
||||||
} else
|
|
||||||
queue_flag_clear(QUEUE_FLAG_WC, q);
|
queue_flag_clear(QUEUE_FLAG_WC, q);
|
||||||
if (fua) {
|
if (fua)
|
||||||
if (wc)
|
|
||||||
q->flush_flags |= REQ_FUA;
|
|
||||||
queue_flag_set(QUEUE_FLAG_FUA, q);
|
queue_flag_set(QUEUE_FLAG_FUA, q);
|
||||||
} else
|
else
|
||||||
queue_flag_clear(QUEUE_FLAG_FUA, q);
|
queue_flag_clear(QUEUE_FLAG_FUA, q);
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
}
|
}
|
||||||
|
@ -477,7 +477,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
|
|||||||
vbd->type |= VDISK_REMOVABLE;
|
vbd->type |= VDISK_REMOVABLE;
|
||||||
|
|
||||||
q = bdev_get_queue(bdev);
|
q = bdev_get_queue(bdev);
|
||||||
if (q && q->flush_flags)
|
if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||||
vbd->flush_support = true;
|
vbd->flush_support = true;
|
||||||
|
|
||||||
if (q && blk_queue_secdiscard(q))
|
if (q && blk_queue_secdiscard(q))
|
||||||
|
@ -1348,13 +1348,13 @@ static void dm_table_verify_integrity(struct dm_table *t)
|
|||||||
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
|
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||||
sector_t start, sector_t len, void *data)
|
sector_t start, sector_t len, void *data)
|
||||||
{
|
{
|
||||||
unsigned flush = (*(unsigned *)data);
|
unsigned long flush = (unsigned long) data;
|
||||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||||
|
|
||||||
return q && (q->flush_flags & flush);
|
return q && (q->queue_flags & flush);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
|
static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
|
||||||
{
|
{
|
||||||
struct dm_target *ti;
|
struct dm_target *ti;
|
||||||
unsigned i = 0;
|
unsigned i = 0;
|
||||||
@ -1375,7 +1375,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (ti->type->iterate_devices &&
|
if (ti->type->iterate_devices &&
|
||||||
ti->type->iterate_devices(ti, device_flush_capable, &flush))
|
ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1518,9 +1518,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
|||||||
else
|
else
|
||||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
|
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
|
||||||
|
|
||||||
if (dm_table_supports_flush(t, REQ_FLUSH)) {
|
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
|
||||||
wc = true;
|
wc = true;
|
||||||
if (dm_table_supports_flush(t, REQ_FUA))
|
if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
|
||||||
fua = true;
|
fua = true;
|
||||||
}
|
}
|
||||||
blk_queue_write_cache(q, wc, fua);
|
blk_queue_write_cache(q, wc, fua);
|
||||||
|
@ -1188,6 +1188,7 @@ ioerr:
|
|||||||
|
|
||||||
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
|
int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
|
||||||
{
|
{
|
||||||
|
struct request_queue *q = bdev_get_queue(rdev->bdev);
|
||||||
struct r5l_log *log;
|
struct r5l_log *log;
|
||||||
|
|
||||||
if (PAGE_SIZE != 4096)
|
if (PAGE_SIZE != 4096)
|
||||||
@ -1197,7 +1198,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
log->rdev = rdev;
|
log->rdev = rdev;
|
||||||
|
|
||||||
log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
|
log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
|
||||||
|
|
||||||
log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
|
log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
|
||||||
sizeof(rdev->mddev->uuid));
|
sizeof(rdev->mddev->uuid));
|
||||||
|
@ -687,10 +687,10 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
|
|||||||
* Force writethrough using WRITE_FUA if a volatile write cache
|
* Force writethrough using WRITE_FUA if a volatile write cache
|
||||||
* is not enabled, or if initiator set the Force Unit Access bit.
|
* is not enabled, or if initiator set the Force Unit Access bit.
|
||||||
*/
|
*/
|
||||||
if (q->flush_flags & REQ_FUA) {
|
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
|
||||||
if (cmd->se_cmd_flags & SCF_FUA)
|
if (cmd->se_cmd_flags & SCF_FUA)
|
||||||
rw = WRITE_FUA;
|
rw = WRITE_FUA;
|
||||||
else if (!(q->flush_flags & REQ_FLUSH))
|
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||||
rw = WRITE_FUA;
|
rw = WRITE_FUA;
|
||||||
else
|
else
|
||||||
rw = WRITE;
|
rw = WRITE;
|
||||||
@ -836,7 +836,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
|
|||||||
struct block_device *bd = ib_dev->ibd_bd;
|
struct block_device *bd = ib_dev->ibd_bd;
|
||||||
struct request_queue *q = bdev_get_queue(bd);
|
struct request_queue *q = bdev_get_queue(bd);
|
||||||
|
|
||||||
return q->flush_flags & REQ_FLUSH;
|
return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct target_backend_ops iblock_ops = {
|
static const struct target_backend_ops iblock_ops = {
|
||||||
|
@ -433,8 +433,6 @@ struct request_queue {
|
|||||||
/*
|
/*
|
||||||
* for flush operations
|
* for flush operations
|
||||||
*/
|
*/
|
||||||
unsigned int flush_flags;
|
|
||||||
unsigned int flush_not_queueable:1;
|
|
||||||
struct blk_flush_queue *fq;
|
struct blk_flush_queue *fq;
|
||||||
|
|
||||||
struct list_head requeue_list;
|
struct list_head requeue_list;
|
||||||
@ -493,6 +491,7 @@ struct request_queue {
|
|||||||
#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
|
#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
|
||||||
#define QUEUE_FLAG_WC 23 /* Write back caching */
|
#define QUEUE_FLAG_WC 23 /* Write back caching */
|
||||||
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
|
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
|
||||||
|
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
|
||||||
|
|
||||||
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
|
||||||
(1 << QUEUE_FLAG_STACKABLE) | \
|
(1 << QUEUE_FLAG_STACKABLE) | \
|
||||||
@ -1365,7 +1364,7 @@ static inline unsigned int block_size(struct block_device *bdev)
|
|||||||
|
|
||||||
static inline bool queue_flush_queueable(struct request_queue *q)
|
static inline bool queue_flush_queueable(struct request_queue *q)
|
||||||
{
|
{
|
||||||
return !q->flush_not_queueable;
|
return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef struct {struct page *v;} Sector;
|
typedef struct {struct page *v;} Sector;
|
||||||
|
Loading…
Reference in New Issue
Block a user