forked from Minki/linux
blk: pass from_schedule to non-request unplug functions.
This will allow md/raid to know why the unplug was called, and will be able to act according - if !from_schedule it is safe to perform tasks which could themselves schedule. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
2a7d5559b3
commit
74018dc306
@ -2909,7 +2909,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flush_plug_callbacks(struct blk_plug *plug)
|
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
|
||||||
{
|
{
|
||||||
LIST_HEAD(callbacks);
|
LIST_HEAD(callbacks);
|
||||||
|
|
||||||
@ -2921,7 +2921,7 @@ static void flush_plug_callbacks(struct blk_plug *plug)
|
|||||||
struct blk_plug_cb,
|
struct blk_plug_cb,
|
||||||
list);
|
list);
|
||||||
list_del(&cb->list);
|
list_del(&cb->list);
|
||||||
cb->callback(cb);
|
cb->callback(cb, from_schedule);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2961,7 +2961,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|||||||
|
|
||||||
BUG_ON(plug->magic != PLUG_MAGIC);
|
BUG_ON(plug->magic != PLUG_MAGIC);
|
||||||
|
|
||||||
flush_plug_callbacks(plug);
|
flush_plug_callbacks(plug, from_schedule);
|
||||||
if (list_empty(&plug->list))
|
if (list_empty(&plug->list))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -513,7 +513,7 @@ static void process_page(unsigned long data)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mm_unplug(struct blk_plug_cb *cb)
|
static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||||
{
|
{
|
||||||
struct cardinfo *card = cb->data;
|
struct cardinfo *card = cb->data;
|
||||||
|
|
||||||
|
@ -498,7 +498,7 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(md_flush_request);
|
EXPORT_SYMBOL(md_flush_request);
|
||||||
|
|
||||||
void md_unplug(struct blk_plug_cb *cb)
|
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||||
{
|
{
|
||||||
struct mddev *mddev = cb->data;
|
struct mddev *mddev = cb->data;
|
||||||
md_wakeup_thread(mddev->thread);
|
md_wakeup_thread(mddev->thread);
|
||||||
|
@ -629,7 +629,7 @@ extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
|||||||
struct mddev *mddev);
|
struct mddev *mddev);
|
||||||
extern void md_trim_bio(struct bio *bio, int offset, int size);
|
extern void md_trim_bio(struct bio *bio, int offset, int size);
|
||||||
|
|
||||||
extern void md_unplug(struct blk_plug_cb *cb);
|
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
|
||||||
static inline int mddev_check_plugged(struct mddev *mddev)
|
static inline int mddev_check_plugged(struct mddev *mddev)
|
||||||
{
|
{
|
||||||
return !!blk_check_plugged(md_unplug, mddev,
|
return !!blk_check_plugged(md_unplug, mddev,
|
||||||
|
@ -923,7 +923,7 @@ struct blk_plug {
|
|||||||
#define BLK_MAX_REQUEST_COUNT 16
|
#define BLK_MAX_REQUEST_COUNT 16
|
||||||
|
|
||||||
struct blk_plug_cb;
|
struct blk_plug_cb;
|
||||||
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *);
|
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
|
||||||
struct blk_plug_cb {
|
struct blk_plug_cb {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
blk_plug_cb_fn callback;
|
blk_plug_cb_fn callback;
|
||||||
|
Loading…
Reference in New Issue
Block a user