Merge remote-tracking branch 'jens/for-4.7/core' into dm-4.7

Needed in order to update the DM thinp code to use the new async
__blkdev_issue_discard() interface.
This commit is contained in:
Mike Snitzer 2016-05-05 15:21:14 -04:00
commit ef1d88ced1
15 changed files with 208 additions and 163 deletions

View File

@ -141,6 +141,15 @@ control of this block device to that new IO scheduler. Note that writing
an IO scheduler name to this file will attempt to load that IO scheduler
module, if it isn't already present in the system.
write_cache (RW)
----------------
When read, this file will display whether the device has write back
caching enabled or not. It will return "write back" for the former
case, and "write through" for the latter. Writing to this file can
change the kernels view of the device, but it doesn't alter the
device state. This means that it might not be safe to toggle the
setting from "write back" to "write through", since that will also
eliminate cache flushes issued by the kernel.
Jens Axboe <jens.axboe@oracle.com>, February 2009

View File

@ -311,17 +311,6 @@ static void bio_chain_endio(struct bio *bio)
bio_endio(__bio_chain_endio(bio));
}
/*
* Increment chain count for the bio. Make sure the CHAIN flag update
* is visible before the raised count.
*/
static inline void bio_inc_remaining(struct bio *bio)
{
bio_set_flag(bio, BIO_CHAIN);
smp_mb__before_atomic();
atomic_inc(&bio->__bi_remaining);
}
/**
* bio_chain - chain bio completions
* @bio: the target bio

View File

@ -1523,6 +1523,7 @@ EXPORT_SYMBOL(blk_put_request);
* blk_add_request_payload - add a payload to a request
* @rq: request to update
* @page: page backing the payload
* @offset: offset in page
* @len: length of the payload.
*
* This allows to later add a payload to an already submitted request by
@ -1533,12 +1534,12 @@ EXPORT_SYMBOL(blk_put_request);
* discard requests should ever use it.
*/
void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len)
int offset, unsigned int len)
{
struct bio *bio = rq->bio;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
bio->bi_io_vec->bv_offset = offset;
bio->bi_io_vec->bv_len = len;
bio->bi_iter.bi_size = len;

View File

@ -9,23 +9,81 @@
#include "blk.h"
struct bio_batch {
atomic_t done;
int error;
struct completion *wait;
};
static void bio_batch_end_io(struct bio *bio)
static struct bio *next_bio(struct bio *bio, int rw, unsigned int nr_pages,
gfp_t gfp)
{
struct bio_batch *bb = bio->bi_private;
struct bio *new = bio_alloc(gfp, nr_pages);
if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
bb->error = bio->bi_error;
if (atomic_dec_and_test(&bb->done))
complete(bb->wait);
bio_put(bio);
if (bio) {
bio_chain(bio, new);
submit_bio(rw, bio);
}
return new;
}
int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop)
{
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
unsigned int granularity;
int alignment;
if (!q)
return -ENXIO;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
if ((type & REQ_SECURE) && !blk_queue_secdiscard(q))
return -EOPNOTSUPP;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
while (nr_sects) {
unsigned int req_sects;
sector_t end_sect, tmp;
/* Make sure bi_size doesn't overflow */
req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
/**
* If splitting a request, and the next starting sector would be
* misaligned, stop the discard at the previous aligned sector.
*/
end_sect = sector + req_sects;
tmp = end_sect;
if (req_sects < nr_sects &&
sector_div(tmp, granularity) != alignment) {
end_sect = end_sect - alignment;
sector_div(end_sect, granularity);
end_sect = end_sect * granularity + alignment;
req_sects = end_sect - sector;
}
bio = next_bio(bio, type, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
sector = end_sect;
/*
* We can loop for a long time in here, if someone does
* full device discards (like mkfs). Be nice and allow
* us to schedule out to avoid softlocking if preempt
* is disabled.
*/
cond_resched();
}
*biop = bio;
return 0;
}
EXPORT_SYMBOL(__blkdev_issue_discard);
/**
* blkdev_issue_discard - queue a discard
* @bdev: blockdev to issue discard for
@ -40,92 +98,24 @@ static void bio_batch_end_io(struct bio *bio)
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = REQ_WRITE | REQ_DISCARD;
unsigned int granularity;
int alignment;
struct bio_batch bb;
struct bio *bio;
int ret = 0;
struct bio *bio = NULL;
struct blk_plug plug;
int ret;
if (!q)
return -ENXIO;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
if (flags & BLKDEV_DISCARD_SECURE) {
if (!blk_queue_secdiscard(q))
return -EOPNOTSUPP;
if (flags & BLKDEV_DISCARD_SECURE)
type |= REQ_SECURE;
}
atomic_set(&bb.done, 1);
bb.error = 0;
bb.wait = &wait;
blk_start_plug(&plug);
while (nr_sects) {
unsigned int req_sects;
sector_t end_sect, tmp;
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
ret = -ENOMEM;
break;
}
/* Make sure bi_size doesn't overflow */
req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
/*
* If splitting a request, and the next starting sector would be
* misaligned, stop the discard at the previous aligned sector.
*/
end_sect = sector + req_sects;
tmp = end_sect;
if (req_sects < nr_sects &&
sector_div(tmp, granularity) != alignment) {
end_sect = end_sect - alignment;
sector_div(end_sect, granularity);
end_sect = end_sect * granularity + alignment;
req_sects = end_sect - sector;
}
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
bio->bi_iter.bi_size = req_sects << 9;
nr_sects -= req_sects;
sector = end_sect;
atomic_inc(&bb.done);
submit_bio(type, bio);
/*
* We can loop for a long time in here, if someone does
* full device discards (like mkfs). Be nice and allow
* us to schedule out to avoid softlocking if preempt
* is disabled.
*/
cond_resched();
ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, type,
&bio);
if (!ret && bio) {
ret = submit_bio_wait(type, bio);
if (ret == -EOPNOTSUPP)
ret = 0;
}
blk_finish_plug(&plug);
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
wait_for_completion_io(&wait);
if (bb.error)
return bb.error;
return ret;
}
EXPORT_SYMBOL(blkdev_issue_discard);
@ -145,11 +135,9 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask,
struct page *page)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
unsigned int max_write_same_sectors;
struct bio_batch bb;
struct bio *bio;
struct bio *bio = NULL;
int ret = 0;
if (!q)
@ -158,21 +146,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9;
atomic_set(&bb.done, 1);
bb.error = 0;
bb.wait = &wait;
while (nr_sects) {
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
ret = -ENOMEM;
break;
}
bio = next_bio(bio, REQ_WRITE | REQ_WRITE_SAME, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_batch_end_io;
bio->bi_bdev = bdev;
bio->bi_private = &bb;
bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0;
@ -186,18 +163,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
bio->bi_iter.bi_size = nr_sects << 9;
nr_sects = 0;
}
atomic_inc(&bb.done);
submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
}
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
wait_for_completion_io(&wait);
if (bb.error)
return bb.error;
return ret;
if (bio)
ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio);
return ret != -EOPNOTSUPP ? ret : 0;
}
EXPORT_SYMBOL(blkdev_issue_write_same);
@ -216,28 +186,15 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask)
{
int ret;
struct bio *bio;
struct bio_batch bb;
struct bio *bio = NULL;
unsigned int sz;
DECLARE_COMPLETION_ONSTACK(wait);
atomic_set(&bb.done, 1);
bb.error = 0;
bb.wait = &wait;
ret = 0;
while (nr_sects != 0) {
bio = bio_alloc(gfp_mask,
min(nr_sects, (sector_t)BIO_MAX_PAGES));
if (!bio) {
ret = -ENOMEM;
break;
}
bio = next_bio(bio, WRITE,
min(nr_sects, (sector_t)BIO_MAX_PAGES),
gfp_mask);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = bio_batch_end_io;
bio->bi_private = &bb;
while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
@ -247,18 +204,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if (ret < (sz << 9))
break;
}
ret = 0;
atomic_inc(&bb.done);
submit_bio(WRITE, bio);
}
/* Wait for bios in-flight */
if (!atomic_dec_and_test(&bb.done))
wait_for_completion_io(&wait);
if (bb.error)
return bb.error;
return ret;
if (bio)
return submit_bio_wait(WRITE, bio);
return 0;
}
/**

View File

@ -474,6 +474,18 @@ void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
}
EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv)
{
int i;
for (i = 0; i < tagset->nr_hw_queues; i++) {
if (tagset->tags && tagset->tags[i])
blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
}
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
{

View File

@ -1122,7 +1122,6 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
init_request_from_bio(rq, bio);
if (blk_do_io_stat(rq))
blk_account_io_start(rq, 1);
}

View File

@ -846,6 +846,32 @@ void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
}
EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
/**
* blk_queue_write_cache - configure queue's write cache
* @q: the request queue for the device
* @wc: write back cache on or off
* @fua: device supports FUA writes, if true
*
* Tell the block layer about the write cache of @q.
*/
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
{
spin_lock_irq(q->queue_lock);
if (wc) {
queue_flag_set(QUEUE_FLAG_WC, q);
q->flush_flags = REQ_FLUSH;
} else
queue_flag_clear(QUEUE_FLAG_WC, q);
if (fua) {
if (wc)
q->flush_flags |= REQ_FUA;
queue_flag_set(QUEUE_FLAG_FUA, q);
} else
queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
}
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
static int __init blk_settings_init(void)
{
blk_max_low_pfn = max_low_pfn - 1;

View File

@ -347,6 +347,38 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
static ssize_t queue_wc_show(struct request_queue *q, char *page)
{
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
return sprintf(page, "write back\n");
return sprintf(page, "write through\n");
}
static ssize_t queue_wc_store(struct request_queue *q, const char *page,
size_t count)
{
int set = -1;
if (!strncmp(page, "write back", 10))
set = 1;
else if (!strncmp(page, "write through", 13) ||
!strncmp(page, "none", 4))
set = 0;
if (set == -1)
return -EINVAL;
spin_lock_irq(q->queue_lock);
if (set)
queue_flag_set(QUEUE_FLAG_WC, q);
else
queue_flag_clear(QUEUE_FLAG_WC, q);
spin_unlock_irq(q->queue_lock);
return count;
}
static struct queue_sysfs_entry queue_requests_entry = {
.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
.show = queue_requests_show,
@ -478,6 +510,12 @@ static struct queue_sysfs_entry queue_poll_entry = {
.store = queue_poll_store,
};
static struct queue_sysfs_entry queue_wc_entry = {
.attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
.show = queue_wc_show,
.store = queue_wc_store,
};
static struct attribute *default_attrs[] = {
&queue_requests_entry.attr,
&queue_ra_entry.attr,
@ -503,6 +541,7 @@ static struct attribute *default_attrs[] = {
&queue_iostats_entry.attr,
&queue_random_entry.attr,
&queue_poll_entry.attr,
&queue_wc_entry.attr,
NULL,
};

View File

@ -562,7 +562,7 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
put_unaligned_be32(count, &buf[16]);
req = skreq->req;
blk_add_request_payload(req, page, len);
blk_add_request_payload(req, page, 0, len);
}
static void skd_request_fn_not_online(struct request_queue *q);

View File

@ -779,7 +779,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
* discarded on disk. This allows us to report completion on the full
* amount of blocks described by the request.
*/
blk_add_request_payload(rq, page, len);
blk_add_request_payload(rq, page, 0, len);
ret = scsi_init_io(cmd);
rq->__data_len = nr_bytes;

View File

@ -702,6 +702,17 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
return bio;
}
/*
* Increment chain count for the bio. Make sure the CHAIN flag update
* is visible before the raised count.
*/
static inline void bio_inc_remaining(struct bio *bio)
{
bio_set_flag(bio, BIO_CHAIN);
smp_mb__before_atomic();
atomic_inc(&bio->__bi_remaining);
}
/*
* bio_set is used to allow other portions of the IO system to
* allocate their own private memory pools for bio and iovec structures.

View File

@ -240,6 +240,8 @@ void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void *priv);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
void blk_mq_freeze_queue_start(struct request_queue *q);

View File

@ -208,7 +208,7 @@ enum rq_flag_bits {
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
REQ_SECURE | REQ_INTEGRITY)
REQ_SECURE | REQ_INTEGRITY | REQ_NOMERGE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)

View File

@ -491,6 +491,8 @@ struct request_queue {
#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
#define QUEUE_FLAG_WC 23 /* Write back caching */
#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@ -779,7 +781,7 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *,
extern void blk_rq_set_block_pc(struct request *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern void blk_add_request_payload(struct request *rq, struct page *page,
unsigned int len);
int offset, unsigned int len);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@ -1009,6 +1011,7 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
@ -1128,6 +1131,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,

View File

@ -1910,7 +1910,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
if (gdtc->dirty > gdtc->bg_thresh)
return true;
if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
if (wb_stat(wb, WB_RECLAIMABLE) >
wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
return true;
if (mdtc) {
@ -1924,7 +1925,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
if (mdtc->dirty > mdtc->bg_thresh)
return true;
if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
if (wb_stat(wb, WB_RECLAIMABLE) >
wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
return true;
}