mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
Merge branch 'for-3.15/core' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe: "This is the pull request for the core block IO bits for the 3.15 kernel. It's a smaller round this time, it contains: - Various little blk-mq fixes and additions from Christoph and myself. - Cleanup of the IPI usage from the block layer, and associated helper code. From Frederic Weisbecker and Jan Kara. - Duplicate code cleanup in bio-integrity from Gu Zheng. This will give you a merge conflict, but that should be easy to resolve. - blk-mq notify spinlock fix for RT from Mike Galbraith. - A blktrace partial accounting bug fix from Roman Pen. - Missing REQ_SYNC detection fix for blk-mq from Shaohua Li" * 'for-3.15/core' of git://git.kernel.dk/linux-block: (25 commits) blk-mq: add REQ_SYNC early rt,blk,mq: Make blk_mq_cpu_notify_lock a raw spinlock blk-mq: support partial I/O completions blk-mq: merge blk_mq_insert_request and blk_mq_run_request blk-mq: remove blk_mq_alloc_rq blk-mq: don't dump CPU -> hw queue map on driver load blk-mq: fix wrong usage of hctx->state vs hctx->flags blk-mq: allow blk_mq_init_commands() to return failure block: remove old blk_iopoll_enabled variable blktrace: fix accounting of partially completed requests smp: Rename __smp_call_function_single() to smp_call_function_single_async() smp: Remove wait argument from __smp_call_function_single() watchdog: Simplify a little the IPI call smp: Move __smp_call_function_single() below its safe version smp: Consolidate the various smp_call_function_single() declensions smp: Teach __smp_call_function_single() to check for offline cpus smp: Remove unused list_head from csd smp: Iterate functions through llist_for_each_entry_safe() block: Stop abusing rq->csd.list in blk-softirq block: Remove useless IPI struct initialization ...
This commit is contained in:
commit
7a48837732
@ -336,7 +336,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
|
|||||||
* under queue_lock. If it's not pointing to @blkg now, it never
|
* under queue_lock. If it's not pointing to @blkg now, it never
|
||||||
* will. Hint assignment itself can race safely.
|
* will. Hint assignment itself can race safely.
|
||||||
*/
|
*/
|
||||||
if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
|
if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
|
||||||
rcu_assign_pointer(blkcg->blkg_hint, NULL);
|
rcu_assign_pointer(blkcg->blkg_hint, NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2353,7 +2353,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
|
|||||||
if (!req->bio)
|
if (!req->bio)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
trace_block_rq_complete(req->q, req);
|
trace_block_rq_complete(req->q, req, nr_bytes);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For fs requests, rq is just carrier of independent bio's
|
* For fs requests, rq is just carrier of independent bio's
|
||||||
|
@ -68,7 +68,7 @@ static void ioc_destroy_icq(struct io_cq *icq)
|
|||||||
* under queue_lock. If it's not pointing to @icq now, it never
|
* under queue_lock. If it's not pointing to @icq now, it never
|
||||||
* will. Hint assignment itself can race safely.
|
* will. Hint assignment itself can race safely.
|
||||||
*/
|
*/
|
||||||
if (rcu_dereference_raw(ioc->icq_hint) == icq)
|
if (rcu_access_pointer(ioc->icq_hint) == icq)
|
||||||
rcu_assign_pointer(ioc->icq_hint, NULL);
|
rcu_assign_pointer(ioc->icq_hint, NULL);
|
||||||
|
|
||||||
ioc_exit_icq(icq);
|
ioc_exit_icq(icq);
|
||||||
|
@ -14,9 +14,6 @@
|
|||||||
|
|
||||||
#include "blk.h"
|
#include "blk.h"
|
||||||
|
|
||||||
int blk_iopoll_enabled = 1;
|
|
||||||
EXPORT_SYMBOL(blk_iopoll_enabled);
|
|
||||||
|
|
||||||
static unsigned int blk_iopoll_budget __read_mostly = 256;
|
static unsigned int blk_iopoll_budget __read_mostly = 256;
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
|
static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
|
||||||
|
@ -9,15 +9,6 @@
|
|||||||
#include "blk.h"
|
#include "blk.h"
|
||||||
#include "blk-mq.h"
|
#include "blk-mq.h"
|
||||||
|
|
||||||
static void show_map(unsigned int *map, unsigned int nr)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
pr_info("blk-mq: CPU -> queue map\n");
|
|
||||||
for_each_online_cpu(i)
|
|
||||||
pr_info(" CPU%2u -> Queue %u\n", i, map[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
|
static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues,
|
||||||
const int cpu)
|
const int cpu)
|
||||||
{
|
{
|
||||||
@ -85,7 +76,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues)
|
|||||||
map[i] = map[first_sibling];
|
map[i] = map[first_sibling];
|
||||||
}
|
}
|
||||||
|
|
||||||
show_map(map, nr_cpus);
|
|
||||||
free_cpumask_var(cpus);
|
free_cpumask_var(cpus);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -244,6 +244,32 @@ static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
|
|||||||
return blk_mq_tag_sysfs_show(hctx->tags, page);
|
return blk_mq_tag_sysfs_show(hctx->tags, page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
|
||||||
|
{
|
||||||
|
unsigned int i, queue_num, first = 1;
|
||||||
|
ssize_t ret = 0;
|
||||||
|
|
||||||
|
blk_mq_disable_hotplug();
|
||||||
|
|
||||||
|
for_each_online_cpu(i) {
|
||||||
|
queue_num = hctx->queue->mq_map[i];
|
||||||
|
if (queue_num != hctx->queue_num)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (first)
|
||||||
|
ret += sprintf(ret + page, "%u", i);
|
||||||
|
else
|
||||||
|
ret += sprintf(ret + page, ", %u", i);
|
||||||
|
|
||||||
|
first = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
blk_mq_enable_hotplug();
|
||||||
|
|
||||||
|
ret += sprintf(ret + page, "\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
|
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
|
||||||
.attr = {.name = "dispatched", .mode = S_IRUGO },
|
.attr = {.name = "dispatched", .mode = S_IRUGO },
|
||||||
.show = blk_mq_sysfs_dispatched_show,
|
.show = blk_mq_sysfs_dispatched_show,
|
||||||
@ -294,6 +320,10 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
|
|||||||
.attr = {.name = "tags", .mode = S_IRUGO },
|
.attr = {.name = "tags", .mode = S_IRUGO },
|
||||||
.show = blk_mq_hw_sysfs_tags_show,
|
.show = blk_mq_hw_sysfs_tags_show,
|
||||||
};
|
};
|
||||||
|
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
|
||||||
|
.attr = {.name = "cpu_list", .mode = S_IRUGO },
|
||||||
|
.show = blk_mq_hw_sysfs_cpus_show,
|
||||||
|
};
|
||||||
|
|
||||||
static struct attribute *default_hw_ctx_attrs[] = {
|
static struct attribute *default_hw_ctx_attrs[] = {
|
||||||
&blk_mq_hw_sysfs_queued.attr,
|
&blk_mq_hw_sysfs_queued.attr,
|
||||||
@ -302,6 +332,7 @@ static struct attribute *default_hw_ctx_attrs[] = {
|
|||||||
&blk_mq_hw_sysfs_pending.attr,
|
&blk_mq_hw_sysfs_pending.attr,
|
||||||
&blk_mq_hw_sysfs_ipi.attr,
|
&blk_mq_hw_sysfs_ipi.attr,
|
||||||
&blk_mq_hw_sysfs_tags.attr,
|
&blk_mq_hw_sysfs_tags.attr,
|
||||||
|
&blk_mq_hw_sysfs_cpus.attr,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -320,7 +320,7 @@ void __blk_mq_complete_request(struct request *rq)
|
|||||||
rq->csd.func = __blk_mq_complete_request_remote;
|
rq->csd.func = __blk_mq_complete_request_remote;
|
||||||
rq->csd.info = rq;
|
rq->csd.info = rq;
|
||||||
rq->csd.flags = 0;
|
rq->csd.flags = 0;
|
||||||
__smp_call_function_single(ctx->cpu, &rq->csd, 0);
|
smp_call_function_single_async(ctx->cpu, &rq->csd);
|
||||||
} else {
|
} else {
|
||||||
rq->q->softirq_done_fn(rq);
|
rq->q->softirq_done_fn(rq);
|
||||||
}
|
}
|
||||||
@ -514,7 +514,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|||||||
LIST_HEAD(rq_list);
|
LIST_HEAD(rq_list);
|
||||||
int bit, queued;
|
int bit, queued;
|
||||||
|
|
||||||
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
|
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
hctx->run++;
|
hctx->run++;
|
||||||
@ -603,7 +603,7 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|||||||
|
|
||||||
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||||
{
|
{
|
||||||
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->flags)))
|
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!async)
|
if (!async)
|
||||||
@ -623,7 +623,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
|
|||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
if ((!blk_mq_hctx_has_pending(hctx) &&
|
if ((!blk_mq_hctx_has_pending(hctx) &&
|
||||||
list_empty_careful(&hctx->dispatch)) ||
|
list_empty_careful(&hctx->dispatch)) ||
|
||||||
test_bit(BLK_MQ_S_STOPPED, &hctx->flags))
|
test_bit(BLK_MQ_S_STOPPED, &hctx->state))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
blk_mq_run_hw_queue(hctx, async);
|
blk_mq_run_hw_queue(hctx, async);
|
||||||
@ -994,8 +994,46 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
|
|||||||
blk_mq_put_ctx(ctx);
|
blk_mq_put_ctx(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
|
static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
|
||||||
void (*init)(void *, struct blk_mq_hw_ctx *,
|
int (*init)(void *, struct blk_mq_hw_ctx *,
|
||||||
|
struct request *, unsigned int),
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
unsigned int i;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
for (i = 0; i < hctx->queue_depth; i++) {
|
||||||
|
struct request *rq = hctx->rqs[i];
|
||||||
|
|
||||||
|
ret = init(data, hctx, rq, i);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int blk_mq_init_commands(struct request_queue *q,
|
||||||
|
int (*init)(void *, struct blk_mq_hw_ctx *,
|
||||||
|
struct request *, unsigned int),
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
struct blk_mq_hw_ctx *hctx;
|
||||||
|
unsigned int i;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
|
ret = blk_mq_init_hw_commands(hctx, init, data);
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(blk_mq_init_commands);
|
||||||
|
|
||||||
|
static void blk_mq_free_hw_commands(struct blk_mq_hw_ctx *hctx,
|
||||||
|
void (*free)(void *, struct blk_mq_hw_ctx *,
|
||||||
struct request *, unsigned int),
|
struct request *, unsigned int),
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
@ -1004,12 +1042,12 @@ static void blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
|
|||||||
for (i = 0; i < hctx->queue_depth; i++) {
|
for (i = 0; i < hctx->queue_depth; i++) {
|
||||||
struct request *rq = hctx->rqs[i];
|
struct request *rq = hctx->rqs[i];
|
||||||
|
|
||||||
init(data, hctx, rq, i);
|
free(data, hctx, rq, i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_init_commands(struct request_queue *q,
|
void blk_mq_free_commands(struct request_queue *q,
|
||||||
void (*init)(void *, struct blk_mq_hw_ctx *,
|
void (*free)(void *, struct blk_mq_hw_ctx *,
|
||||||
struct request *, unsigned int),
|
struct request *, unsigned int),
|
||||||
void *data)
|
void *data)
|
||||||
{
|
{
|
||||||
@ -1017,9 +1055,9 @@ void blk_mq_init_commands(struct request_queue *q,
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i)
|
queue_for_each_hw_ctx(q, hctx, i)
|
||||||
blk_mq_init_hw_commands(hctx, init, data);
|
blk_mq_free_hw_commands(hctx, free, data);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_init_commands);
|
EXPORT_SYMBOL(blk_mq_free_commands);
|
||||||
|
|
||||||
static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
|
static void blk_mq_free_rq_map(struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
@ -1430,6 +1468,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
|
|||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void blk_mq_disable_hotplug(void)
|
||||||
|
{
|
||||||
|
mutex_lock(&all_q_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
void blk_mq_enable_hotplug(void)
|
||||||
|
{
|
||||||
|
mutex_unlock(&all_q_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
static int __init blk_mq_init(void)
|
static int __init blk_mq_init(void)
|
||||||
{
|
{
|
||||||
blk_mq_cpu_init();
|
blk_mq_cpu_init();
|
||||||
|
@ -39,6 +39,8 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
|
|||||||
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
|
void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
|
||||||
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
|
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
|
||||||
void blk_mq_cpu_init(void);
|
void blk_mq_cpu_init(void);
|
||||||
|
void blk_mq_enable_hotplug(void);
|
||||||
|
void blk_mq_disable_hotplug(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CPU -> queue mappings
|
* CPU -> queue mappings
|
||||||
|
@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
|
|||||||
while (!list_empty(&local_list)) {
|
while (!list_empty(&local_list)) {
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
rq = list_entry(local_list.next, struct request, csd.list);
|
rq = list_entry(local_list.next, struct request, queuelist);
|
||||||
list_del_init(&rq->csd.list);
|
list_del_init(&rq->queuelist);
|
||||||
rq->q->softirq_done_fn(rq);
|
rq->q->softirq_done_fn(rq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -45,9 +45,14 @@ static void trigger_softirq(void *data)
|
|||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
list = this_cpu_ptr(&blk_cpu_done);
|
list = this_cpu_ptr(&blk_cpu_done);
|
||||||
list_add_tail(&rq->csd.list, list);
|
/*
|
||||||
|
* We reuse queuelist for a list of requests to process. Since the
|
||||||
|
* queuelist is used by the block layer only for requests waiting to be
|
||||||
|
* submitted to the device it is unused now.
|
||||||
|
*/
|
||||||
|
list_add_tail(&rq->queuelist, list);
|
||||||
|
|
||||||
if (list->next == &rq->csd.list)
|
if (list->next == &rq->queuelist)
|
||||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
@ -65,7 +70,7 @@ static int raise_blk_irq(int cpu, struct request *rq)
|
|||||||
data->info = rq;
|
data->info = rq;
|
||||||
data->flags = 0;
|
data->flags = 0;
|
||||||
|
|
||||||
__smp_call_function_single(cpu, data, 0);
|
smp_call_function_single_async(cpu, data);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -136,7 +141,7 @@ void __blk_complete_request(struct request *req)
|
|||||||
struct list_head *list;
|
struct list_head *list;
|
||||||
do_local:
|
do_local:
|
||||||
list = this_cpu_ptr(&blk_cpu_done);
|
list = this_cpu_ptr(&blk_cpu_done);
|
||||||
list_add_tail(&req->csd.list, list);
|
list_add_tail(&req->queuelist, list);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the list only contains our just added request,
|
* if the list only contains our just added request,
|
||||||
@ -144,7 +149,7 @@ do_local:
|
|||||||
* entries there, someone already raised the irq but it
|
* entries there, someone already raised the irq but it
|
||||||
* hasn't run yet.
|
* hasn't run yet.
|
||||||
*/
|
*/
|
||||||
if (list->next == &req->csd.list)
|
if (list->next == &req->queuelist)
|
||||||
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
raise_softirq_irqoff(BLOCK_SOFTIRQ);
|
||||||
} else if (raise_blk_irq(ccpu, req))
|
} else if (raise_blk_irq(ccpu, req))
|
||||||
goto do_local;
|
goto do_local;
|
||||||
|
@ -2367,10 +2367,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
|
|||||||
* reposition in fifo if next is older than rq
|
* reposition in fifo if next is older than rq
|
||||||
*/
|
*/
|
||||||
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
|
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
|
||||||
time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
|
time_before(next->fifo_time, rq->fifo_time) &&
|
||||||
cfqq == RQ_CFQQ(next)) {
|
cfqq == RQ_CFQQ(next)) {
|
||||||
list_move(&rq->queuelist, &next->queuelist);
|
list_move(&rq->queuelist, &next->queuelist);
|
||||||
rq_set_fifo_time(rq, rq_fifo_time(next));
|
rq->fifo_time = next->fifo_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cfqq->next_rq == next)
|
if (cfqq->next_rq == next)
|
||||||
@ -2814,7 +2814,7 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
rq = rq_entry_fifo(cfqq->fifo.next);
|
rq = rq_entry_fifo(cfqq->fifo.next);
|
||||||
if (time_before(jiffies, rq_fifo_time(rq)))
|
if (time_before(jiffies, rq->fifo_time))
|
||||||
rq = NULL;
|
rq = NULL;
|
||||||
|
|
||||||
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
|
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
|
||||||
@ -3927,7 +3927,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
|
|||||||
cfq_log_cfqq(cfqd, cfqq, "insert_request");
|
cfq_log_cfqq(cfqd, cfqq, "insert_request");
|
||||||
cfq_init_prio_data(cfqq, RQ_CIC(rq));
|
cfq_init_prio_data(cfqq, RQ_CIC(rq));
|
||||||
|
|
||||||
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
|
rq->fifo_time = jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
|
||||||
list_add_tail(&rq->queuelist, &cfqq->fifo);
|
list_add_tail(&rq->queuelist, &cfqq->fifo);
|
||||||
cfq_add_rq_rb(rq);
|
cfq_add_rq_rb(rq);
|
||||||
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
|
cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
|
||||||
|
@ -106,7 +106,7 @@ deadline_add_request(struct request_queue *q, struct request *rq)
|
|||||||
/*
|
/*
|
||||||
* set expire time and add to fifo list
|
* set expire time and add to fifo list
|
||||||
*/
|
*/
|
||||||
rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]);
|
rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
|
||||||
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
|
list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,9 +174,9 @@ deadline_merged_requests(struct request_queue *q, struct request *req,
|
|||||||
* and move into next position (next will be deleted) in fifo
|
* and move into next position (next will be deleted) in fifo
|
||||||
*/
|
*/
|
||||||
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
|
if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
|
||||||
if (time_before(rq_fifo_time(next), rq_fifo_time(req))) {
|
if (time_before(next->fifo_time, req->fifo_time)) {
|
||||||
list_move(&req->queuelist, &next->queuelist);
|
list_move(&req->queuelist, &next->queuelist);
|
||||||
rq_set_fifo_time(req, rq_fifo_time(next));
|
req->fifo_time = next->fifo_time;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,7 +230,7 @@ static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
|
|||||||
/*
|
/*
|
||||||
* rq is expired!
|
* rq is expired!
|
||||||
*/
|
*/
|
||||||
if (time_after_eq(jiffies, rq_fifo_time(rq)))
|
if (time_after_eq(jiffies, rq->fifo_time))
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -11,6 +11,8 @@
|
|||||||
* by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
|
* by Guenther Kelleter (guenther@pool.informatik.rwth-aachen.de)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
|
||||||
struct partition_info
|
struct partition_info
|
||||||
{
|
{
|
||||||
u8 flg; /* bit 0: active; bit 7: bootable */
|
u8 flg; /* bit 0: active; bit 7: bootable */
|
||||||
@ -29,6 +31,6 @@ struct rootsector
|
|||||||
u32 bsl_st; /* start of bad sector list */
|
u32 bsl_st; /* start of bad sector list */
|
||||||
u32 bsl_cnt; /* length of bad sector list */
|
u32 bsl_cnt; /* length of bad sector list */
|
||||||
u16 checksum; /* checksum for bootable disks */
|
u16 checksum; /* checksum for bootable disks */
|
||||||
} __attribute__((__packed__));
|
} __packed;
|
||||||
|
|
||||||
int atari_partition(struct parsed_partitions *state);
|
int atari_partition(struct parsed_partitions *state);
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
#include <linux/major.h>
|
#include <linux/major.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/efi.h>
|
#include <linux/efi.h>
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
|
||||||
#define MSDOS_MBR_SIGNATURE 0xaa55
|
#define MSDOS_MBR_SIGNATURE 0xaa55
|
||||||
#define EFI_PMBR_OSTYPE_EFI 0xEF
|
#define EFI_PMBR_OSTYPE_EFI 0xEF
|
||||||
@ -87,13 +88,13 @@ typedef struct _gpt_header {
|
|||||||
*
|
*
|
||||||
* uint8_t reserved2[ BlockSize - 92 ];
|
* uint8_t reserved2[ BlockSize - 92 ];
|
||||||
*/
|
*/
|
||||||
} __attribute__ ((packed)) gpt_header;
|
} __packed gpt_header;
|
||||||
|
|
||||||
typedef struct _gpt_entry_attributes {
|
typedef struct _gpt_entry_attributes {
|
||||||
u64 required_to_function:1;
|
u64 required_to_function:1;
|
||||||
u64 reserved:47;
|
u64 reserved:47;
|
||||||
u64 type_guid_specific:16;
|
u64 type_guid_specific:16;
|
||||||
} __attribute__ ((packed)) gpt_entry_attributes;
|
} __packed gpt_entry_attributes;
|
||||||
|
|
||||||
typedef struct _gpt_entry {
|
typedef struct _gpt_entry {
|
||||||
efi_guid_t partition_type_guid;
|
efi_guid_t partition_type_guid;
|
||||||
@ -102,7 +103,7 @@ typedef struct _gpt_entry {
|
|||||||
__le64 ending_lba;
|
__le64 ending_lba;
|
||||||
gpt_entry_attributes attributes;
|
gpt_entry_attributes attributes;
|
||||||
efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
|
efi_char16_t partition_name[72 / sizeof (efi_char16_t)];
|
||||||
} __attribute__ ((packed)) gpt_entry;
|
} __packed gpt_entry;
|
||||||
|
|
||||||
typedef struct _gpt_mbr_record {
|
typedef struct _gpt_mbr_record {
|
||||||
u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
|
u8 boot_indicator; /* unused by EFI, set to 0x80 for bootable */
|
||||||
@ -124,7 +125,7 @@ typedef struct _legacy_mbr {
|
|||||||
__le16 unknown;
|
__le16 unknown;
|
||||||
gpt_mbr_record partition_record[4];
|
gpt_mbr_record partition_record[4];
|
||||||
__le16 signature;
|
__le16 signature;
|
||||||
} __attribute__ ((packed)) legacy_mbr;
|
} __packed legacy_mbr;
|
||||||
|
|
||||||
/* Functions */
|
/* Functions */
|
||||||
extern int efi_partition(struct parsed_partitions *state);
|
extern int efi_partition(struct parsed_partitions *state);
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
|
|
||||||
#include "check.h"
|
#include "check.h"
|
||||||
#include "karma.h"
|
#include "karma.h"
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
|
||||||
int karma_partition(struct parsed_partitions *state)
|
int karma_partition(struct parsed_partitions *state)
|
||||||
{
|
{
|
||||||
@ -26,7 +27,7 @@ int karma_partition(struct parsed_partitions *state)
|
|||||||
} d_partitions[2];
|
} d_partitions[2];
|
||||||
u8 d_blank[208];
|
u8 d_blank[208];
|
||||||
__le16 d_magic;
|
__le16 d_magic;
|
||||||
} __attribute__((packed)) *label;
|
} __packed *label;
|
||||||
struct d_partition *p;
|
struct d_partition *p;
|
||||||
|
|
||||||
data = read_part_sector(state, 0, §);
|
data = read_part_sector(state, 0, §);
|
||||||
|
@ -490,13 +490,14 @@ static struct blk_mq_reg virtio_mq_reg = {
|
|||||||
.flags = BLK_MQ_F_SHOULD_MERGE,
|
.flags = BLK_MQ_F_SHOULD_MERGE,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
|
static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx,
|
||||||
struct request *rq, unsigned int nr)
|
struct request *rq, unsigned int nr)
|
||||||
{
|
{
|
||||||
struct virtio_blk *vblk = data;
|
struct virtio_blk *vblk = data;
|
||||||
struct virtblk_req *vbr = rq->special;
|
struct virtblk_req *vbr = rq->special;
|
||||||
|
|
||||||
sg_init_table(vbr->sg, vblk->sg_elems);
|
sg_init_table(vbr->sg, vblk->sg_elems);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtblk_probe(struct virtio_device *vdev)
|
static int virtblk_probe(struct virtio_device *vdev)
|
||||||
|
@ -323,7 +323,7 @@ static void cpuidle_coupled_poke(int cpu)
|
|||||||
struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
|
struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
|
||||||
|
|
||||||
if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
|
if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
|
||||||
__smp_call_function_single(cpu, csd, 0);
|
smp_call_function_single_async(cpu, csd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -898,7 +898,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
|
|||||||
struct be_queue_info *cq;
|
struct be_queue_info *cq;
|
||||||
unsigned int num_eq_processed;
|
unsigned int num_eq_processed;
|
||||||
struct be_eq_obj *pbe_eq;
|
struct be_eq_obj *pbe_eq;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
pbe_eq = dev_id;
|
pbe_eq = dev_id;
|
||||||
eq = &pbe_eq->q;
|
eq = &pbe_eq->q;
|
||||||
@ -907,31 +906,15 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
|
|||||||
|
|
||||||
phba = pbe_eq->phba;
|
phba = pbe_eq->phba;
|
||||||
num_eq_processed = 0;
|
num_eq_processed = 0;
|
||||||
if (blk_iopoll_enabled) {
|
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
|
||||||
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
|
& EQE_VALID_MASK) {
|
||||||
& EQE_VALID_MASK) {
|
if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
|
||||||
if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
|
blk_iopoll_sched(&pbe_eq->iopoll);
|
||||||
blk_iopoll_sched(&pbe_eq->iopoll);
|
|
||||||
|
|
||||||
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
||||||
queue_tail_inc(eq);
|
queue_tail_inc(eq);
|
||||||
eqe = queue_tail_node(eq);
|
eqe = queue_tail_node(eq);
|
||||||
num_eq_processed++;
|
num_eq_processed++;
|
||||||
}
|
|
||||||
} else {
|
|
||||||
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
|
|
||||||
& EQE_VALID_MASK) {
|
|
||||||
spin_lock_irqsave(&phba->isr_lock, flags);
|
|
||||||
pbe_eq->todo_cq = true;
|
|
||||||
spin_unlock_irqrestore(&phba->isr_lock, flags);
|
|
||||||
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
|
||||||
queue_tail_inc(eq);
|
|
||||||
eqe = queue_tail_node(eq);
|
|
||||||
num_eq_processed++;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pbe_eq->todo_cq)
|
|
||||||
queue_work(phba->wq, &pbe_eq->work_cqs);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_eq_processed)
|
if (num_eq_processed)
|
||||||
@ -952,7 +935,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)
|
|||||||
struct hwi_context_memory *phwi_context;
|
struct hwi_context_memory *phwi_context;
|
||||||
struct be_eq_entry *eqe = NULL;
|
struct be_eq_entry *eqe = NULL;
|
||||||
struct be_queue_info *eq;
|
struct be_queue_info *eq;
|
||||||
struct be_queue_info *cq;
|
|
||||||
struct be_queue_info *mcc;
|
struct be_queue_info *mcc;
|
||||||
unsigned long flags, index;
|
unsigned long flags, index;
|
||||||
unsigned int num_mcceq_processed, num_ioeq_processed;
|
unsigned int num_mcceq_processed, num_ioeq_processed;
|
||||||
@ -978,72 +960,40 @@ static irqreturn_t be_isr(int irq, void *dev_id)
|
|||||||
|
|
||||||
num_ioeq_processed = 0;
|
num_ioeq_processed = 0;
|
||||||
num_mcceq_processed = 0;
|
num_mcceq_processed = 0;
|
||||||
if (blk_iopoll_enabled) {
|
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
|
||||||
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
|
& EQE_VALID_MASK) {
|
||||||
& EQE_VALID_MASK) {
|
if (((eqe->dw[offsetof(struct amap_eq_entry,
|
||||||
if (((eqe->dw[offsetof(struct amap_eq_entry,
|
resource_id) / 32] &
|
||||||
resource_id) / 32] &
|
EQE_RESID_MASK) >> 16) == mcc->id) {
|
||||||
EQE_RESID_MASK) >> 16) == mcc->id) {
|
spin_lock_irqsave(&phba->isr_lock, flags);
|
||||||
spin_lock_irqsave(&phba->isr_lock, flags);
|
pbe_eq->todo_mcc_cq = true;
|
||||||
pbe_eq->todo_mcc_cq = true;
|
spin_unlock_irqrestore(&phba->isr_lock, flags);
|
||||||
spin_unlock_irqrestore(&phba->isr_lock, flags);
|
num_mcceq_processed++;
|
||||||
num_mcceq_processed++;
|
} else {
|
||||||
} else {
|
if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
|
||||||
if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
|
blk_iopoll_sched(&pbe_eq->iopoll);
|
||||||
blk_iopoll_sched(&pbe_eq->iopoll);
|
|
||||||
num_ioeq_processed++;
|
|
||||||
}
|
|
||||||
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
|
||||||
queue_tail_inc(eq);
|
|
||||||
eqe = queue_tail_node(eq);
|
|
||||||
}
|
|
||||||
if (num_ioeq_processed || num_mcceq_processed) {
|
|
||||||
if (pbe_eq->todo_mcc_cq)
|
|
||||||
queue_work(phba->wq, &pbe_eq->work_cqs);
|
|
||||||
|
|
||||||
if ((num_mcceq_processed) && (!num_ioeq_processed))
|
|
||||||
hwi_ring_eq_db(phba, eq->id, 0,
|
|
||||||
(num_ioeq_processed +
|
|
||||||
num_mcceq_processed) , 1, 1);
|
|
||||||
else
|
|
||||||
hwi_ring_eq_db(phba, eq->id, 0,
|
|
||||||
(num_ioeq_processed +
|
|
||||||
num_mcceq_processed), 0, 1);
|
|
||||||
|
|
||||||
return IRQ_HANDLED;
|
|
||||||
} else
|
|
||||||
return IRQ_NONE;
|
|
||||||
} else {
|
|
||||||
cq = &phwi_context->be_cq[0];
|
|
||||||
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
|
|
||||||
& EQE_VALID_MASK) {
|
|
||||||
|
|
||||||
if (((eqe->dw[offsetof(struct amap_eq_entry,
|
|
||||||
resource_id) / 32] &
|
|
||||||
EQE_RESID_MASK) >> 16) != cq->id) {
|
|
||||||
spin_lock_irqsave(&phba->isr_lock, flags);
|
|
||||||
pbe_eq->todo_mcc_cq = true;
|
|
||||||
spin_unlock_irqrestore(&phba->isr_lock, flags);
|
|
||||||
} else {
|
|
||||||
spin_lock_irqsave(&phba->isr_lock, flags);
|
|
||||||
pbe_eq->todo_cq = true;
|
|
||||||
spin_unlock_irqrestore(&phba->isr_lock, flags);
|
|
||||||
}
|
|
||||||
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
|
||||||
queue_tail_inc(eq);
|
|
||||||
eqe = queue_tail_node(eq);
|
|
||||||
num_ioeq_processed++;
|
num_ioeq_processed++;
|
||||||
}
|
}
|
||||||
if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
|
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
|
||||||
|
queue_tail_inc(eq);
|
||||||
|
eqe = queue_tail_node(eq);
|
||||||
|
}
|
||||||
|
if (num_ioeq_processed || num_mcceq_processed) {
|
||||||
|
if (pbe_eq->todo_mcc_cq)
|
||||||
queue_work(phba->wq, &pbe_eq->work_cqs);
|
queue_work(phba->wq, &pbe_eq->work_cqs);
|
||||||
|
|
||||||
if (num_ioeq_processed) {
|
if ((num_mcceq_processed) && (!num_ioeq_processed))
|
||||||
hwi_ring_eq_db(phba, eq->id, 0,
|
hwi_ring_eq_db(phba, eq->id, 0,
|
||||||
num_ioeq_processed, 1, 1);
|
(num_ioeq_processed +
|
||||||
return IRQ_HANDLED;
|
num_mcceq_processed) , 1, 1);
|
||||||
} else
|
else
|
||||||
return IRQ_NONE;
|
hwi_ring_eq_db(phba, eq->id, 0,
|
||||||
}
|
(num_ioeq_processed +
|
||||||
|
num_mcceq_processed), 0, 1);
|
||||||
|
|
||||||
|
return IRQ_HANDLED;
|
||||||
|
} else
|
||||||
|
return IRQ_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
|
static int beiscsi_init_irqs(struct beiscsi_hba *phba)
|
||||||
@ -5263,11 +5213,10 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
|
|||||||
}
|
}
|
||||||
pci_disable_msix(phba->pcidev);
|
pci_disable_msix(phba->pcidev);
|
||||||
|
|
||||||
if (blk_iopoll_enabled)
|
for (i = 0; i < phba->num_cpus; i++) {
|
||||||
for (i = 0; i < phba->num_cpus; i++) {
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
blk_iopoll_disable(&pbe_eq->iopoll);
|
||||||
blk_iopoll_disable(&pbe_eq->iopoll);
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (unload_state == BEISCSI_CLEAN_UNLOAD) {
|
if (unload_state == BEISCSI_CLEAN_UNLOAD) {
|
||||||
destroy_workqueue(phba->wq);
|
destroy_workqueue(phba->wq);
|
||||||
@ -5478,32 +5427,18 @@ static void beiscsi_eeh_resume(struct pci_dev *pdev)
|
|||||||
phwi_ctrlr = phba->phwi_ctrlr;
|
phwi_ctrlr = phba->phwi_ctrlr;
|
||||||
phwi_context = phwi_ctrlr->phwi_ctxt;
|
phwi_context = phwi_ctrlr->phwi_ctxt;
|
||||||
|
|
||||||
if (blk_iopoll_enabled) {
|
for (i = 0; i < phba->num_cpus; i++) {
|
||||||
for (i = 0; i < phba->num_cpus; i++) {
|
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
|
||||||
blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
|
|
||||||
be_iopoll);
|
|
||||||
blk_iopoll_enable(&pbe_eq->iopoll);
|
|
||||||
}
|
|
||||||
|
|
||||||
i = (phba->msix_enabled) ? i : 0;
|
|
||||||
/* Work item for MCC handling */
|
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
|
blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
|
||||||
} else {
|
be_iopoll);
|
||||||
if (phba->msix_enabled) {
|
blk_iopoll_enable(&pbe_eq->iopoll);
|
||||||
for (i = 0; i <= phba->num_cpus; i++) {
|
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
|
||||||
INIT_WORK(&pbe_eq->work_cqs,
|
|
||||||
beiscsi_process_all_cqs);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pbe_eq = &phwi_context->be_eq[0];
|
|
||||||
INIT_WORK(&pbe_eq->work_cqs,
|
|
||||||
beiscsi_process_all_cqs);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i = (phba->msix_enabled) ? i : 0;
|
||||||
|
/* Work item for MCC handling */
|
||||||
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
|
INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
|
||||||
|
|
||||||
ret = beiscsi_init_irqs(phba);
|
ret = beiscsi_init_irqs(phba);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
|
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
|
||||||
@ -5665,32 +5600,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
|
|||||||
phwi_ctrlr = phba->phwi_ctrlr;
|
phwi_ctrlr = phba->phwi_ctrlr;
|
||||||
phwi_context = phwi_ctrlr->phwi_ctxt;
|
phwi_context = phwi_ctrlr->phwi_ctxt;
|
||||||
|
|
||||||
if (blk_iopoll_enabled) {
|
for (i = 0; i < phba->num_cpus; i++) {
|
||||||
for (i = 0; i < phba->num_cpus; i++) {
|
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
|
||||||
blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
|
|
||||||
be_iopoll);
|
|
||||||
blk_iopoll_enable(&pbe_eq->iopoll);
|
|
||||||
}
|
|
||||||
|
|
||||||
i = (phba->msix_enabled) ? i : 0;
|
|
||||||
/* Work item for MCC handling */
|
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
|
blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
|
||||||
} else {
|
be_iopoll);
|
||||||
if (phba->msix_enabled) {
|
blk_iopoll_enable(&pbe_eq->iopoll);
|
||||||
for (i = 0; i <= phba->num_cpus; i++) {
|
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
|
||||||
INIT_WORK(&pbe_eq->work_cqs,
|
|
||||||
beiscsi_process_all_cqs);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pbe_eq = &phwi_context->be_eq[0];
|
|
||||||
INIT_WORK(&pbe_eq->work_cqs,
|
|
||||||
beiscsi_process_all_cqs);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
i = (phba->msix_enabled) ? i : 0;
|
||||||
|
/* Work item for MCC handling */
|
||||||
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
|
INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
|
||||||
|
|
||||||
ret = beiscsi_init_irqs(phba);
|
ret = beiscsi_init_irqs(phba);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
|
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
|
||||||
@ -5719,11 +5640,10 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
|
|||||||
|
|
||||||
free_blkenbld:
|
free_blkenbld:
|
||||||
destroy_workqueue(phba->wq);
|
destroy_workqueue(phba->wq);
|
||||||
if (blk_iopoll_enabled)
|
for (i = 0; i < phba->num_cpus; i++) {
|
||||||
for (i = 0; i < phba->num_cpus; i++) {
|
pbe_eq = &phwi_context->be_eq[i];
|
||||||
pbe_eq = &phwi_context->be_eq[i];
|
blk_iopoll_disable(&pbe_eq->iopoll);
|
||||||
blk_iopoll_disable(&pbe_eq->iopoll);
|
}
|
||||||
}
|
|
||||||
free_twq:
|
free_twq:
|
||||||
beiscsi_clean_port(phba);
|
beiscsi_clean_port(phba);
|
||||||
beiscsi_free_mem(phba);
|
beiscsi_free_mem(phba);
|
||||||
|
@ -3670,16 +3670,14 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev,
|
|||||||
return strlen(buf);
|
return strlen(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
|
||||||
for (i = 1; i < ioa_cfg->hrrq_num; i++)
|
for (i = 1; i < ioa_cfg->hrrq_num; i++)
|
||||||
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
|
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(shost->host_lock, lock_flags);
|
spin_lock_irqsave(shost->host_lock, lock_flags);
|
||||||
ioa_cfg->iopoll_weight = user_iopoll_weight;
|
ioa_cfg->iopoll_weight = user_iopoll_weight;
|
||||||
if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
|
||||||
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
|
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
|
||||||
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
|
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
|
||||||
ioa_cfg->iopoll_weight, ipr_iopoll);
|
ioa_cfg->iopoll_weight, ipr_iopoll);
|
||||||
@ -5525,8 +5523,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
|
|||||||
return IRQ_NONE;
|
return IRQ_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
|
||||||
if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
|
if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
|
||||||
hrrq->toggle_bit) {
|
hrrq->toggle_bit) {
|
||||||
if (!blk_iopoll_sched_prep(&hrrq->iopoll))
|
if (!blk_iopoll_sched_prep(&hrrq->iopoll))
|
||||||
@ -9975,8 +9972,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||||||
ioa_cfg->host->max_channel = IPR_VSET_BUS;
|
ioa_cfg->host->max_channel = IPR_VSET_BUS;
|
||||||
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
|
ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
|
||||||
|
|
||||||
if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
|
||||||
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
|
for (i = 1; i < ioa_cfg->hrrq_num; i++) {
|
||||||
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
|
blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
|
||||||
ioa_cfg->iopoll_weight, ipr_iopoll);
|
ioa_cfg->iopoll_weight, ipr_iopoll);
|
||||||
@ -10005,8 +10001,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
|
||||||
if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
|
if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
||||||
ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
|
|
||||||
ioa_cfg->iopoll_weight = 0;
|
ioa_cfg->iopoll_weight = 0;
|
||||||
for (i = 1; i < ioa_cfg->hrrq_num; i++)
|
for (i = 1; i < ioa_cfg->hrrq_num; i++)
|
||||||
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
|
blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
|
||||||
|
@ -301,25 +301,25 @@ int bio_integrity_get_tag(struct bio *bio, void *tag_buf, unsigned int len)
|
|||||||
EXPORT_SYMBOL(bio_integrity_get_tag);
|
EXPORT_SYMBOL(bio_integrity_get_tag);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bio_integrity_generate - Generate integrity metadata for a bio
|
* bio_integrity_generate_verify - Generate/verify integrity metadata for a bio
|
||||||
* @bio: bio to generate integrity metadata for
|
* @bio: bio to generate/verify integrity metadata for
|
||||||
*
|
* @operate: operate number, 1 for generate, 0 for verify
|
||||||
* Description: Generates integrity metadata for a bio by calling the
|
|
||||||
* block device's generation callback function. The bio must have a
|
|
||||||
* bip attached with enough room to accommodate the generated
|
|
||||||
* integrity metadata.
|
|
||||||
*/
|
*/
|
||||||
static void bio_integrity_generate(struct bio *bio)
|
static int bio_integrity_generate_verify(struct bio *bio, int operate)
|
||||||
{
|
{
|
||||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
||||||
struct blk_integrity_exchg bix;
|
struct blk_integrity_exchg bix;
|
||||||
struct bio_vec bv;
|
struct bio_vec bv;
|
||||||
struct bvec_iter iter;
|
struct bvec_iter iter;
|
||||||
sector_t sector = bio->bi_iter.bi_sector;
|
sector_t sector;
|
||||||
unsigned int sectors, total;
|
unsigned int sectors, ret = 0;
|
||||||
void *prot_buf = bio->bi_integrity->bip_buf;
|
void *prot_buf = bio->bi_integrity->bip_buf;
|
||||||
|
|
||||||
total = 0;
|
if (operate)
|
||||||
|
sector = bio->bi_iter.bi_sector;
|
||||||
|
else
|
||||||
|
sector = bio->bi_integrity->bip_iter.bi_sector;
|
||||||
|
|
||||||
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
|
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
|
||||||
bix.sector_size = bi->sector_size;
|
bix.sector_size = bi->sector_size;
|
||||||
|
|
||||||
@ -330,16 +330,37 @@ static void bio_integrity_generate(struct bio *bio)
|
|||||||
bix.prot_buf = prot_buf;
|
bix.prot_buf = prot_buf;
|
||||||
bix.sector = sector;
|
bix.sector = sector;
|
||||||
|
|
||||||
bi->generate_fn(&bix);
|
if (operate) {
|
||||||
|
bi->generate_fn(&bix);
|
||||||
|
} else {
|
||||||
|
ret = bi->verify_fn(&bix);
|
||||||
|
if (ret) {
|
||||||
|
kunmap_atomic(kaddr);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
sectors = bv.bv_len / bi->sector_size;
|
sectors = bv.bv_len / bi->sector_size;
|
||||||
sector += sectors;
|
sector += sectors;
|
||||||
prot_buf += sectors * bi->tuple_size;
|
prot_buf += sectors * bi->tuple_size;
|
||||||
total += sectors * bi->tuple_size;
|
|
||||||
BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
|
|
||||||
|
|
||||||
kunmap_atomic(kaddr);
|
kunmap_atomic(kaddr);
|
||||||
}
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bio_integrity_generate - Generate integrity metadata for a bio
|
||||||
|
* @bio: bio to generate integrity metadata for
|
||||||
|
*
|
||||||
|
* Description: Generates integrity metadata for a bio by calling the
|
||||||
|
* block device's generation callback function. The bio must have a
|
||||||
|
* bip attached with enough room to accommodate the generated
|
||||||
|
* integrity metadata.
|
||||||
|
*/
|
||||||
|
static void bio_integrity_generate(struct bio *bio)
|
||||||
|
{
|
||||||
|
bio_integrity_generate_verify(bio, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
|
static inline unsigned short blk_integrity_tuple_size(struct blk_integrity *bi)
|
||||||
@ -454,40 +475,7 @@ EXPORT_SYMBOL(bio_integrity_prep);
|
|||||||
*/
|
*/
|
||||||
static int bio_integrity_verify(struct bio *bio)
|
static int bio_integrity_verify(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
|
return bio_integrity_generate_verify(bio, 0);
|
||||||
struct blk_integrity_exchg bix;
|
|
||||||
struct bio_vec *bv;
|
|
||||||
sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
|
|
||||||
unsigned int sectors, ret = 0;
|
|
||||||
void *prot_buf = bio->bi_integrity->bip_buf;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
|
|
||||||
bix.sector_size = bi->sector_size;
|
|
||||||
|
|
||||||
bio_for_each_segment_all(bv, bio, i) {
|
|
||||||
void *kaddr = kmap_atomic(bv->bv_page);
|
|
||||||
|
|
||||||
bix.data_buf = kaddr + bv->bv_offset;
|
|
||||||
bix.data_size = bv->bv_len;
|
|
||||||
bix.prot_buf = prot_buf;
|
|
||||||
bix.sector = sector;
|
|
||||||
|
|
||||||
ret = bi->verify_fn(&bix);
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
kunmap_atomic(kaddr);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
sectors = bv->bv_len / bi->sector_size;
|
|
||||||
sector += sectors;
|
|
||||||
prot_buf += sectors * bi->tuple_size;
|
|
||||||
|
|
||||||
kunmap_atomic(kaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
1
fs/bio.c
1
fs/bio.c
@ -116,7 +116,6 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
|
|||||||
if (!slab)
|
if (!slab)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
|
|
||||||
bslab->slab = slab;
|
bslab->slab = slab;
|
||||||
bslab->slab_ref = 1;
|
bslab->slab_ref = 1;
|
||||||
bslab->slab_size = sz;
|
bslab->slab_size = sz;
|
||||||
|
@ -43,6 +43,4 @@ extern void __blk_iopoll_complete(struct blk_iopoll *);
|
|||||||
extern void blk_iopoll_enable(struct blk_iopoll *);
|
extern void blk_iopoll_enable(struct blk_iopoll *);
|
||||||
extern void blk_iopoll_disable(struct blk_iopoll *);
|
extern void blk_iopoll_disable(struct blk_iopoll *);
|
||||||
|
|
||||||
extern int blk_iopoll_enabled;
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -109,7 +109,7 @@ enum {
|
|||||||
BLK_MQ_F_SHOULD_SORT = 1 << 1,
|
BLK_MQ_F_SHOULD_SORT = 1 << 1,
|
||||||
BLK_MQ_F_SHOULD_IPI = 1 << 2,
|
BLK_MQ_F_SHOULD_IPI = 1 << 2,
|
||||||
|
|
||||||
BLK_MQ_S_STOPPED = 1 << 0,
|
BLK_MQ_S_STOPPED = 0,
|
||||||
|
|
||||||
BLK_MQ_MAX_DEPTH = 2048,
|
BLK_MQ_MAX_DEPTH = 2048,
|
||||||
};
|
};
|
||||||
@ -117,7 +117,8 @@ enum {
|
|||||||
struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
|
struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
|
||||||
int blk_mq_register_disk(struct gendisk *);
|
int blk_mq_register_disk(struct gendisk *);
|
||||||
void blk_mq_unregister_disk(struct gendisk *);
|
void blk_mq_unregister_disk(struct gendisk *);
|
||||||
void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
|
int blk_mq_init_commands(struct request_queue *, int (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
|
||||||
|
void blk_mq_free_commands(struct request_queue *, void (*free)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
|
||||||
|
|
||||||
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
|
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
|
||||||
|
|
||||||
|
@ -99,6 +99,7 @@ struct request {
|
|||||||
union {
|
union {
|
||||||
struct call_single_data csd;
|
struct call_single_data csd;
|
||||||
struct work_struct mq_flush_work;
|
struct work_struct mq_flush_work;
|
||||||
|
unsigned long fifo_time;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
|
@ -202,17 +202,8 @@ enum {
|
|||||||
#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
|
#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
|
||||||
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
|
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
|
||||||
|
|
||||||
/*
|
|
||||||
* Hack to reuse the csd.list list_head as the fifo time holder while
|
|
||||||
* the request is in the io scheduler. Saves an unsigned long in rq.
|
|
||||||
*/
|
|
||||||
#define rq_fifo_time(rq) ((unsigned long) (rq)->csd.list.next)
|
|
||||||
#define rq_set_fifo_time(rq,exp) ((rq)->csd.list.next = (void *) (exp))
|
|
||||||
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
|
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
|
||||||
#define rq_fifo_clear(rq) do { \
|
#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
|
||||||
list_del_init(&(rq)->queuelist); \
|
|
||||||
INIT_LIST_HEAD(&(rq)->csd.list); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#else /* CONFIG_BLOCK */
|
#else /* CONFIG_BLOCK */
|
||||||
|
|
||||||
|
@ -17,10 +17,7 @@ extern void cpu_idle(void);
|
|||||||
|
|
||||||
typedef void (*smp_call_func_t)(void *info);
|
typedef void (*smp_call_func_t)(void *info);
|
||||||
struct call_single_data {
|
struct call_single_data {
|
||||||
union {
|
struct llist_node llist;
|
||||||
struct list_head list;
|
|
||||||
struct llist_node llist;
|
|
||||||
};
|
|
||||||
smp_call_func_t func;
|
smp_call_func_t func;
|
||||||
void *info;
|
void *info;
|
||||||
u16 flags;
|
u16 flags;
|
||||||
@ -53,8 +50,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
|
|||||||
smp_call_func_t func, void *info, bool wait,
|
smp_call_func_t func, void *info, bool wait,
|
||||||
gfp_t gfp_flags);
|
gfp_t gfp_flags);
|
||||||
|
|
||||||
void __smp_call_function_single(int cpuid, struct call_single_data *data,
|
int smp_call_function_single_async(int cpu, struct call_single_data *csd);
|
||||||
int wait);
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
|
@ -132,6 +132,7 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
|
|||||||
* block_rq_complete - block IO operation completed by device driver
|
* block_rq_complete - block IO operation completed by device driver
|
||||||
* @q: queue containing the block operation request
|
* @q: queue containing the block operation request
|
||||||
* @rq: block operations request
|
* @rq: block operations request
|
||||||
|
* @nr_bytes: number of completed bytes
|
||||||
*
|
*
|
||||||
* The block_rq_complete tracepoint event indicates that some portion
|
* The block_rq_complete tracepoint event indicates that some portion
|
||||||
* of operation request has been completed by the device driver. If
|
* of operation request has been completed by the device driver. If
|
||||||
@ -139,11 +140,37 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
|
|||||||
* do for the request. If @rq->bio is non-NULL then there is
|
* do for the request. If @rq->bio is non-NULL then there is
|
||||||
* additional work required to complete the request.
|
* additional work required to complete the request.
|
||||||
*/
|
*/
|
||||||
DEFINE_EVENT(block_rq_with_error, block_rq_complete,
|
TRACE_EVENT(block_rq_complete,
|
||||||
|
|
||||||
TP_PROTO(struct request_queue *q, struct request *rq),
|
TP_PROTO(struct request_queue *q, struct request *rq,
|
||||||
|
unsigned int nr_bytes),
|
||||||
|
|
||||||
TP_ARGS(q, rq)
|
TP_ARGS(q, rq, nr_bytes),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field( dev_t, dev )
|
||||||
|
__field( sector_t, sector )
|
||||||
|
__field( unsigned int, nr_sector )
|
||||||
|
__field( int, errors )
|
||||||
|
__array( char, rwbs, RWBS_LEN )
|
||||||
|
__dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
|
||||||
|
__entry->sector = blk_rq_pos(rq);
|
||||||
|
__entry->nr_sector = nr_bytes >> 9;
|
||||||
|
__entry->errors = rq->errors;
|
||||||
|
|
||||||
|
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
|
||||||
|
blk_dump_cmd(__get_str(cmd), rq);
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("%d,%d %s (%s) %llu + %u [%d]",
|
||||||
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||||
|
__entry->rwbs, __get_str(cmd),
|
||||||
|
(unsigned long long)__entry->sector,
|
||||||
|
__entry->nr_sector, __entry->errors)
|
||||||
);
|
);
|
||||||
|
|
||||||
DECLARE_EVENT_CLASS(block_rq,
|
DECLARE_EVENT_CLASS(block_rq,
|
||||||
|
@ -432,7 +432,7 @@ void hrtick_start(struct rq *rq, u64 delay)
|
|||||||
if (rq == this_rq()) {
|
if (rq == this_rq()) {
|
||||||
__hrtick_restart(rq);
|
__hrtick_restart(rq);
|
||||||
} else if (!rq->hrtick_csd_pending) {
|
} else if (!rq->hrtick_csd_pending) {
|
||||||
__smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
|
smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
|
||||||
rq->hrtick_csd_pending = 1;
|
rq->hrtick_csd_pending = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
141
kernel/smp.c
141
kernel/smp.c
@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd)
|
|||||||
csd->flags &= ~CSD_FLAG_LOCK;
|
csd->flags &= ~CSD_FLAG_LOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Insert a previously allocated call_single_data element
|
* Insert a previously allocated call_single_data element
|
||||||
* for execution on the given CPU. data must already have
|
* for execution on the given CPU. data must already have
|
||||||
* ->func, ->info, and ->flags set.
|
* ->func, ->info, and ->flags set.
|
||||||
*/
|
*/
|
||||||
static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
|
static int generic_exec_single(int cpu, struct call_single_data *csd,
|
||||||
|
smp_call_func_t func, void *info, int wait)
|
||||||
{
|
{
|
||||||
|
struct call_single_data csd_stack = { .flags = 0 };
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
|
||||||
|
if (cpu == smp_processor_id()) {
|
||||||
|
local_irq_save(flags);
|
||||||
|
func(info);
|
||||||
|
local_irq_restore(flags);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
|
|
||||||
|
if (!csd) {
|
||||||
|
csd = &csd_stack;
|
||||||
|
if (!wait)
|
||||||
|
csd = &__get_cpu_var(csd_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
csd_lock(csd);
|
||||||
|
|
||||||
|
csd->func = func;
|
||||||
|
csd->info = info;
|
||||||
|
|
||||||
if (wait)
|
if (wait)
|
||||||
csd->flags |= CSD_FLAG_WAIT;
|
csd->flags |= CSD_FLAG_WAIT;
|
||||||
|
|
||||||
@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
|
|||||||
|
|
||||||
if (wait)
|
if (wait)
|
||||||
csd_lock_wait(csd);
|
csd_lock_wait(csd);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -151,7 +183,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
|
|||||||
*/
|
*/
|
||||||
void generic_smp_call_function_single_interrupt(void)
|
void generic_smp_call_function_single_interrupt(void)
|
||||||
{
|
{
|
||||||
struct llist_node *entry, *next;
|
struct llist_node *entry;
|
||||||
|
struct call_single_data *csd, *csd_next;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Shouldn't receive this interrupt on a cpu that is not yet online.
|
* Shouldn't receive this interrupt on a cpu that is not yet online.
|
||||||
@ -161,21 +194,12 @@ void generic_smp_call_function_single_interrupt(void)
|
|||||||
entry = llist_del_all(&__get_cpu_var(call_single_queue));
|
entry = llist_del_all(&__get_cpu_var(call_single_queue));
|
||||||
entry = llist_reverse_order(entry);
|
entry = llist_reverse_order(entry);
|
||||||
|
|
||||||
while (entry) {
|
llist_for_each_entry_safe(csd, csd_next, entry, llist) {
|
||||||
struct call_single_data *csd;
|
|
||||||
|
|
||||||
next = entry->next;
|
|
||||||
|
|
||||||
csd = llist_entry(entry, struct call_single_data, llist);
|
|
||||||
csd->func(csd->info);
|
csd->func(csd->info);
|
||||||
csd_unlock(csd);
|
csd_unlock(csd);
|
||||||
|
|
||||||
entry = next;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* smp_call_function_single - Run a function on a specific CPU
|
* smp_call_function_single - Run a function on a specific CPU
|
||||||
* @func: The function to run. This must be fast and non-blocking.
|
* @func: The function to run. This must be fast and non-blocking.
|
||||||
@ -187,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
|
|||||||
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
||||||
int wait)
|
int wait)
|
||||||
{
|
{
|
||||||
struct call_single_data d = {
|
|
||||||
.flags = 0,
|
|
||||||
};
|
|
||||||
unsigned long flags;
|
|
||||||
int this_cpu;
|
int this_cpu;
|
||||||
int err = 0;
|
int err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* prevent preemption and reschedule on another processor,
|
* prevent preemption and reschedule on another processor,
|
||||||
@ -209,26 +229,7 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
|||||||
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
|
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
|
||||||
&& !oops_in_progress);
|
&& !oops_in_progress);
|
||||||
|
|
||||||
if (cpu == this_cpu) {
|
err = generic_exec_single(cpu, NULL, func, info, wait);
|
||||||
local_irq_save(flags);
|
|
||||||
func(info);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
} else {
|
|
||||||
if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
|
|
||||||
struct call_single_data *csd = &d;
|
|
||||||
|
|
||||||
if (!wait)
|
|
||||||
csd = &__get_cpu_var(csd_data);
|
|
||||||
|
|
||||||
csd_lock(csd);
|
|
||||||
|
|
||||||
csd->func = func;
|
|
||||||
csd->info = info;
|
|
||||||
generic_exec_single(cpu, csd, wait);
|
|
||||||
} else {
|
|
||||||
err = -ENXIO; /* CPU not online */
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
put_cpu();
|
put_cpu();
|
||||||
|
|
||||||
@ -236,6 +237,34 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(smp_call_function_single);
|
EXPORT_SYMBOL(smp_call_function_single);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* smp_call_function_single_async(): Run an asynchronous function on a
|
||||||
|
* specific CPU.
|
||||||
|
* @cpu: The CPU to run on.
|
||||||
|
* @csd: Pre-allocated and setup data structure
|
||||||
|
*
|
||||||
|
* Like smp_call_function_single(), but the call is asynchonous and
|
||||||
|
* can thus be done from contexts with disabled interrupts.
|
||||||
|
*
|
||||||
|
* The caller passes his own pre-allocated data structure
|
||||||
|
* (ie: embedded in an object) and is responsible for synchronizing it
|
||||||
|
* such that the IPIs performed on the @csd are strictly serialized.
|
||||||
|
*
|
||||||
|
* NOTE: Be careful, there is unfortunately no current debugging facility to
|
||||||
|
* validate the correctness of this serialization.
|
||||||
|
*/
|
||||||
|
int smp_call_function_single_async(int cpu, struct call_single_data *csd)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(smp_call_function_single_async);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* smp_call_function_any - Run a function on any of the given cpus
|
* smp_call_function_any - Run a function on any of the given cpus
|
||||||
* @mask: The mask of cpus it can run on.
|
* @mask: The mask of cpus it can run on.
|
||||||
@ -279,44 +308,6 @@ call:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(smp_call_function_any);
|
EXPORT_SYMBOL_GPL(smp_call_function_any);
|
||||||
|
|
||||||
/**
|
|
||||||
* __smp_call_function_single(): Run a function on a specific CPU
|
|
||||||
* @cpu: The CPU to run on.
|
|
||||||
* @data: Pre-allocated and setup data structure
|
|
||||||
* @wait: If true, wait until function has completed on specified CPU.
|
|
||||||
*
|
|
||||||
* Like smp_call_function_single(), but allow caller to pass in a
|
|
||||||
* pre-allocated data structure. Useful for embedding @data inside
|
|
||||||
* other structures, for instance.
|
|
||||||
*/
|
|
||||||
void __smp_call_function_single(int cpu, struct call_single_data *csd,
|
|
||||||
int wait)
|
|
||||||
{
|
|
||||||
unsigned int this_cpu;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
this_cpu = get_cpu();
|
|
||||||
/*
|
|
||||||
* Can deadlock when called with interrupts disabled.
|
|
||||||
* We allow cpu's that are not yet online though, as no one else can
|
|
||||||
* send smp call function interrupt to this cpu and as such deadlocks
|
|
||||||
* can't happen.
|
|
||||||
*/
|
|
||||||
WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
|
|
||||||
&& !oops_in_progress);
|
|
||||||
|
|
||||||
if (cpu == this_cpu) {
|
|
||||||
local_irq_save(flags);
|
|
||||||
csd->func(csd->info);
|
|
||||||
local_irq_restore(flags);
|
|
||||||
} else {
|
|
||||||
csd_lock(csd);
|
|
||||||
generic_exec_single(cpu, csd, wait);
|
|
||||||
}
|
|
||||||
put_cpu();
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(__smp_call_function_single);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* smp_call_function_many(): Run a function on a set of other CPUs.
|
* smp_call_function_many(): Run a function on a set of other CPUs.
|
||||||
* @mask: The set of cpus to run on (only runs on online subset).
|
* @mask: The set of cpus to run on (only runs on online subset).
|
||||||
|
@ -112,9 +112,6 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
|
|||||||
#ifndef CONFIG_MMU
|
#ifndef CONFIG_MMU
|
||||||
extern int sysctl_nr_trim_pages;
|
extern int sysctl_nr_trim_pages;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_BLOCK
|
|
||||||
extern int blk_iopoll_enabled;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Constants used for minimum and maximum */
|
/* Constants used for minimum and maximum */
|
||||||
#ifdef CONFIG_LOCKUP_DETECTOR
|
#ifdef CONFIG_LOCKUP_DETECTOR
|
||||||
@ -1086,15 +1083,6 @@ static struct ctl_table kern_table[] = {
|
|||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_dointvec,
|
||||||
},
|
},
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_BLOCK
|
|
||||||
{
|
|
||||||
.procname = "blk_iopoll",
|
|
||||||
.data = &blk_iopoll_enabled,
|
|
||||||
.maxlen = sizeof(int),
|
|
||||||
.mode = 0644,
|
|
||||||
.proc_handler = proc_dointvec,
|
|
||||||
},
|
|
||||||
#endif
|
#endif
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
@ -702,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q)
|
|||||||
* blk_add_trace_rq - Add a trace for a request oriented action
|
* blk_add_trace_rq - Add a trace for a request oriented action
|
||||||
* @q: queue the io is for
|
* @q: queue the io is for
|
||||||
* @rq: the source request
|
* @rq: the source request
|
||||||
|
* @nr_bytes: number of completed bytes
|
||||||
* @what: the action
|
* @what: the action
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
@ -709,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q)
|
|||||||
*
|
*
|
||||||
**/
|
**/
|
||||||
static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
||||||
u32 what)
|
unsigned int nr_bytes, u32 what)
|
||||||
{
|
{
|
||||||
struct blk_trace *bt = q->blk_trace;
|
struct blk_trace *bt = q->blk_trace;
|
||||||
|
|
||||||
@ -718,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
|||||||
|
|
||||||
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
|
||||||
what |= BLK_TC_ACT(BLK_TC_PC);
|
what |= BLK_TC_ACT(BLK_TC_PC);
|
||||||
__blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
|
__blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
|
||||||
what, rq->errors, rq->cmd_len, rq->cmd);
|
what, rq->errors, rq->cmd_len, rq->cmd);
|
||||||
} else {
|
} else {
|
||||||
what |= BLK_TC_ACT(BLK_TC_FS);
|
what |= BLK_TC_ACT(BLK_TC_FS);
|
||||||
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
|
__blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
|
||||||
rq->cmd_flags, what, rq->errors, 0, NULL);
|
rq->cmd_flags, what, rq->errors, 0, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -730,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
|
|||||||
static void blk_add_trace_rq_abort(void *ignore,
|
static void blk_add_trace_rq_abort(void *ignore,
|
||||||
struct request_queue *q, struct request *rq)
|
struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(q, rq, BLK_TA_ABORT);
|
blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_rq_insert(void *ignore,
|
static void blk_add_trace_rq_insert(void *ignore,
|
||||||
struct request_queue *q, struct request *rq)
|
struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(q, rq, BLK_TA_INSERT);
|
blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_rq_issue(void *ignore,
|
static void blk_add_trace_rq_issue(void *ignore,
|
||||||
struct request_queue *q, struct request *rq)
|
struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
|
blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_rq_requeue(void *ignore,
|
static void blk_add_trace_rq_requeue(void *ignore,
|
||||||
struct request_queue *q,
|
struct request_queue *q,
|
||||||
struct request *rq)
|
struct request *rq)
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
|
blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_add_trace_rq_complete(void *ignore,
|
static void blk_add_trace_rq_complete(void *ignore,
|
||||||
struct request_queue *q,
|
struct request_queue *q,
|
||||||
struct request *rq)
|
struct request *rq,
|
||||||
|
unsigned int nr_bytes)
|
||||||
{
|
{
|
||||||
blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
|
blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -22,16 +22,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(smp_call_function_single);
|
EXPORT_SYMBOL(smp_call_function_single);
|
||||||
|
|
||||||
void __smp_call_function_single(int cpu, struct call_single_data *csd,
|
int smp_call_function_single_async(int cpu, struct call_single_data *csd)
|
||||||
int wait)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
csd->func(csd->info);
|
csd->func(csd->info);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__smp_call_function_single);
|
EXPORT_SYMBOL(smp_call_function_single_async);
|
||||||
|
|
||||||
int on_each_cpu(smp_call_func_t func, void *info, int wait)
|
int on_each_cpu(smp_call_func_t func, void *info, int wait)
|
||||||
{
|
{
|
||||||
|
@ -505,7 +505,6 @@ static void restart_watchdog_hrtimer(void *info)
|
|||||||
|
|
||||||
static void update_timers(int cpu)
|
static void update_timers(int cpu)
|
||||||
{
|
{
|
||||||
struct call_single_data data = {.func = restart_watchdog_hrtimer};
|
|
||||||
/*
|
/*
|
||||||
* Make sure that perf event counter will adopt to a new
|
* Make sure that perf event counter will adopt to a new
|
||||||
* sampling period. Updating the sampling period directly would
|
* sampling period. Updating the sampling period directly would
|
||||||
@ -515,7 +514,7 @@ static void update_timers(int cpu)
|
|||||||
* might be late already so we have to restart the timer as well.
|
* might be late already so we have to restart the timer as well.
|
||||||
*/
|
*/
|
||||||
watchdog_nmi_disable(cpu);
|
watchdog_nmi_disable(cpu);
|
||||||
__smp_call_function_single(cpu, &data, 1);
|
smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
|
||||||
watchdog_nmi_enable(cpu);
|
watchdog_nmi_enable(cpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4135,8 +4135,8 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
|
|||||||
struct softnet_data *next = remsd->rps_ipi_next;
|
struct softnet_data *next = remsd->rps_ipi_next;
|
||||||
|
|
||||||
if (cpu_online(remsd->cpu))
|
if (cpu_online(remsd->cpu))
|
||||||
__smp_call_function_single(remsd->cpu,
|
smp_call_function_single_async(remsd->cpu,
|
||||||
&remsd->csd, 0);
|
&remsd->csd);
|
||||||
remsd = next;
|
remsd = next;
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
|
Loading…
Reference in New Issue
Block a user