mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
block: kill blk_queue_flush()
We don't have any drivers left using it, so kill it off. Update documentation to use the newer blk_queue_write_cache(). Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
f935a8ce0a
commit
2245f6de6c
@ -71,7 +71,7 @@ requests that have a payload. For devices with volatile write caches the
|
||||
driver needs to tell the block layer that it supports flushing caches by
|
||||
doing:
|
||||
|
||||
blk_queue_flush(sdkp->disk->queue, REQ_FLUSH);
|
||||
blk_queue_write_cache(sdkp->disk->queue, true, false);
|
||||
|
||||
and handle empty REQ_FLUSH requests in its prep_fn/request_fn. Note that
|
||||
REQ_FLUSH requests with a payload are automatically turned into a sequence
|
||||
@ -79,7 +79,7 @@ of an empty REQ_FLUSH request followed by the actual write by the block
|
||||
layer. For devices that also support the FUA bit the block layer needs
|
||||
to be told to pass through the REQ_FUA bit using:
|
||||
|
||||
blk_queue_flush(sdkp->disk->queue, REQ_FLUSH | REQ_FUA);
|
||||
blk_queue_write_cache(sdkp->disk->queue, true, true);
|
||||
|
||||
and the driver must handle write requests that have the REQ_FUA bit set
|
||||
in prep_fn/request_fn. If the FUA bit is not natively supported the block
|
||||
|
@ -820,26 +820,6 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_queue_update_dma_alignment);
|
||||
|
||||
/**
|
||||
* blk_queue_flush - configure queue's cache flush capability
|
||||
* @q: the request queue for the device
|
||||
* @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
|
||||
*
|
||||
* Tell block layer cache flush capability of @q. If it supports
|
||||
* flushing, REQ_FLUSH should be set. If it supports bypassing
|
||||
* write cache for individual writes, REQ_FUA should be set.
|
||||
*/
|
||||
void blk_queue_flush(struct request_queue *q, unsigned int flush)
|
||||
{
|
||||
WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
|
||||
|
||||
if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
|
||||
flush &= ~REQ_FUA;
|
||||
|
||||
q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_flush);
|
||||
|
||||
void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
|
||||
{
|
||||
q->flush_not_queueable = !queueable;
|
||||
|
@ -1009,7 +1009,6 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
|
||||
extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
|
||||
extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
|
||||
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
|
||||
extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
|
||||
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
|
||||
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
|
||||
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
||||
|
Loading…
Reference in New Issue
Block a user