mirror of
https://github.com/torvalds/linux.git
synced 2024-11-13 23:51:39 +00:00
genirq: Remove mask argument from setup_affinity()
No point to have this alloc/free dance of cpumasks. Provide a static mask for setup_affinity() and protect it proper. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Keith Busch <keith.busch@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Link: http://lkml.kernel.org/r/20170619235444.851571573@linutronix.de
This commit is contained in:
parent
8e7b632237
commit
cba4235e60
@ -109,7 +109,7 @@ static inline void unregister_handler_proc(unsigned int irq,
|
|||||||
|
|
||||||
extern bool irq_can_set_affinity_usr(unsigned int irq);
|
extern bool irq_can_set_affinity_usr(unsigned int irq);
|
||||||
|
|
||||||
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
|
extern int irq_select_affinity_usr(unsigned int irq);
|
||||||
|
|
||||||
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
||||||
|
|
||||||
|
@ -345,15 +345,18 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
|
|||||||
/*
|
/*
|
||||||
* Generic version of the affinity autoselector.
|
* Generic version of the affinity autoselector.
|
||||||
*/
|
*/
|
||||||
static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
|
static int irq_setup_affinity(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
struct cpumask *set = irq_default_affinity;
|
struct cpumask *set = irq_default_affinity;
|
||||||
int node = irq_desc_get_node(desc);
|
int ret, node = irq_desc_get_node(desc);
|
||||||
|
static DEFINE_RAW_SPINLOCK(mask_lock);
|
||||||
|
static struct cpumask mask;
|
||||||
|
|
||||||
/* Excludes PER_CPU and NO_BALANCE interrupts */
|
/* Excludes PER_CPU and NO_BALANCE interrupts */
|
||||||
if (!__irq_can_set_affinity(desc))
|
if (!__irq_can_set_affinity(desc))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
raw_spin_lock(&mask_lock);
|
||||||
/*
|
/*
|
||||||
* Preserve the managed affinity setting and a userspace affinity
|
* Preserve the managed affinity setting and a userspace affinity
|
||||||
* setup, but make sure that one of the targets is online.
|
* setup, but make sure that one of the targets is online.
|
||||||
@ -367,43 +370,42 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
|
|||||||
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
|
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
|
||||||
}
|
}
|
||||||
|
|
||||||
cpumask_and(mask, cpu_online_mask, set);
|
cpumask_and(&mask, cpu_online_mask, set);
|
||||||
if (node != NUMA_NO_NODE) {
|
if (node != NUMA_NO_NODE) {
|
||||||
const struct cpumask *nodemask = cpumask_of_node(node);
|
const struct cpumask *nodemask = cpumask_of_node(node);
|
||||||
|
|
||||||
/* make sure at least one of the cpus in nodemask is online */
|
/* make sure at least one of the cpus in nodemask is online */
|
||||||
if (cpumask_intersects(mask, nodemask))
|
if (cpumask_intersects(&mask, nodemask))
|
||||||
cpumask_and(mask, mask, nodemask);
|
cpumask_and(&mask, &mask, nodemask);
|
||||||
}
|
}
|
||||||
irq_do_set_affinity(&desc->irq_data, mask, false);
|
ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
|
||||||
return 0;
|
raw_spin_unlock(&mask_lock);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
/* Wrapper for ALPHA specific affinity selector magic */
|
/* Wrapper for ALPHA specific affinity selector magic */
|
||||||
static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
|
int irq_setup_affinity(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
return irq_select_affinity(irq_desc_get_irq(d));
|
return irq_select_affinity(irq_desc_get_irq(desc));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called when affinity is set via /proc/irq
|
* Called when a bogus affinity is set via /proc/irq
|
||||||
*/
|
*/
|
||||||
int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
|
int irq_select_affinity_usr(unsigned int irq)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc = irq_to_desc(irq);
|
struct irq_desc *desc = irq_to_desc(irq);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||||
ret = setup_affinity(desc, mask);
|
ret = irq_setup_affinity(desc);
|
||||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline int
|
static inline int setup_affinity(struct irq_desc *desc)
|
||||||
setup_affinity(struct irq_desc *desc, struct cpumask *mask)
|
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1128,7 +1130,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||||||
struct irqaction *old, **old_ptr;
|
struct irqaction *old, **old_ptr;
|
||||||
unsigned long flags, thread_mask = 0;
|
unsigned long flags, thread_mask = 0;
|
||||||
int ret, nested, shared = 0;
|
int ret, nested, shared = 0;
|
||||||
cpumask_var_t mask;
|
|
||||||
|
|
||||||
if (!desc)
|
if (!desc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1187,11 +1188,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out_thread;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Drivers are often written to work w/o knowledge about the
|
* Drivers are often written to work w/o knowledge about the
|
||||||
* underlying irq chip implementation, so a request for a
|
* underlying irq chip implementation, so a request for a
|
||||||
@ -1256,7 +1252,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||||||
*/
|
*/
|
||||||
if (thread_mask == ~0UL) {
|
if (thread_mask == ~0UL) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto out_mask;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* The thread_mask for the action is or'ed to
|
* The thread_mask for the action is or'ed to
|
||||||
@ -1300,7 +1296,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||||||
pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
|
pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
|
||||||
irq);
|
irq);
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto out_mask;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!shared) {
|
if (!shared) {
|
||||||
@ -1308,7 +1304,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
|
pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
|
||||||
new->name, irq, desc->irq_data.chip->name);
|
new->name, irq, desc->irq_data.chip->name);
|
||||||
goto out_mask;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
init_waitqueue_head(&desc->wait_for_threads);
|
init_waitqueue_head(&desc->wait_for_threads);
|
||||||
@ -1320,7 +1316,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
irq_release_resources(desc);
|
irq_release_resources(desc);
|
||||||
goto out_mask;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1357,7 +1353,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Set default affinity mask once everything is setup */
|
/* Set default affinity mask once everything is setup */
|
||||||
setup_affinity(desc, mask);
|
irq_setup_affinity(desc);
|
||||||
|
|
||||||
} else if (new->flags & IRQF_TRIGGER_MASK) {
|
} else if (new->flags & IRQF_TRIGGER_MASK) {
|
||||||
unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
|
unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
|
||||||
@ -1401,8 +1397,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||||||
irq_add_debugfs_entry(irq, desc);
|
irq_add_debugfs_entry(irq, desc);
|
||||||
new->dir = NULL;
|
new->dir = NULL;
|
||||||
register_handler_proc(irq, new);
|
register_handler_proc(irq, new);
|
||||||
free_cpumask_var(mask);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mismatch:
|
mismatch:
|
||||||
@ -1415,9 +1409,8 @@ mismatch:
|
|||||||
}
|
}
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
|
|
||||||
out_mask:
|
out_unlock:
|
||||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||||
free_cpumask_var(mask);
|
|
||||||
|
|
||||||
out_thread:
|
out_thread:
|
||||||
if (new->thread) {
|
if (new->thread) {
|
||||||
|
@ -120,9 +120,11 @@ static ssize_t write_irq_affinity(int type, struct file *file,
|
|||||||
* one online CPU still has to be targeted.
|
* one online CPU still has to be targeted.
|
||||||
*/
|
*/
|
||||||
if (!cpumask_intersects(new_value, cpu_online_mask)) {
|
if (!cpumask_intersects(new_value, cpu_online_mask)) {
|
||||||
/* Special case for empty set - allow the architecture
|
/*
|
||||||
code to set default SMP affinity. */
|
* Special case for empty set - allow the architecture code
|
||||||
err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
|
* to set default SMP affinity.
|
||||||
|
*/
|
||||||
|
err = irq_select_affinity_usr(irq) ? -EINVAL : count;
|
||||||
} else {
|
} else {
|
||||||
irq_set_affinity(irq, new_value);
|
irq_set_affinity(irq, new_value);
|
||||||
err = count;
|
err = count;
|
||||||
|
Loading…
Reference in New Issue
Block a user