mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 22:21:40 +00:00
genirq: Introduce irq_do_set_affinity() to reduce duplicated code
All invocations of chip->irq_set_affinity() are doing the same return value checks. Let them all use a common function. [ tglx: removed the silly likely while at it ] Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: Jiang Liu <liuj97@gmail.com> Cc: Keping Chen <chenkeping@huawei.com> Link: http://lkml.kernel.org/r/1333120296-13563-3-git-send-email-jiang.liu@huawei.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
23812b9d9e
commit
818b0f3bfb
@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
|
||||
|
||||
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
||||
|
||||
extern int irq_do_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *dest, bool force);
|
||||
|
||||
/* Inline functions for support of irq chips on slow busses */
|
||||
static inline void chip_bus_lock(struct irq_desc *desc)
|
||||
{
|
||||
|
@ -139,6 +139,25 @@ static inline void
|
||||
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
|
||||
#endif
|
||||
|
||||
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
|
||||
bool force)
|
||||
{
|
||||
struct irq_desc *desc = irq_data_to_desc(data);
|
||||
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
||||
int ret;
|
||||
|
||||
ret = chip->irq_set_affinity(data, mask, false);
|
||||
switch (ret) {
|
||||
case IRQ_SET_MASK_OK:
|
||||
cpumask_copy(data->affinity, mask);
|
||||
case IRQ_SET_MASK_OK_NOCOPY:
|
||||
irq_set_thread_affinity(desc);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
||||
@ -149,14 +168,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
|
||||
return -EINVAL;
|
||||
|
||||
if (irq_can_move_pcntxt(data)) {
|
||||
ret = chip->irq_set_affinity(data, mask, false);
|
||||
switch (ret) {
|
||||
case IRQ_SET_MASK_OK:
|
||||
cpumask_copy(data->affinity, mask);
|
||||
case IRQ_SET_MASK_OK_NOCOPY:
|
||||
irq_set_thread_affinity(desc);
|
||||
ret = 0;
|
||||
}
|
||||
ret = irq_do_set_affinity(data, mask, false);
|
||||
} else {
|
||||
irqd_set_move_pending(data);
|
||||
irq_copy_pending(desc, mask);
|
||||
@ -280,9 +292,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
|
||||
static int
|
||||
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
struct cpumask *set = irq_default_affinity;
|
||||
int ret, node = desc->irq_data.node;
|
||||
int node = desc->irq_data.node;
|
||||
|
||||
/* Excludes PER_CPU and NO_BALANCE interrupts */
|
||||
if (!irq_can_set_affinity(irq))
|
||||
@ -308,13 +319,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
|
||||
if (cpumask_intersects(mask, nodemask))
|
||||
cpumask_and(mask, mask, nodemask);
|
||||
}
|
||||
ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
|
||||
switch (ret) {
|
||||
case IRQ_SET_MASK_OK:
|
||||
cpumask_copy(desc->irq_data.affinity, mask);
|
||||
case IRQ_SET_MASK_OK_NOCOPY:
|
||||
irq_set_thread_affinity(desc);
|
||||
}
|
||||
irq_do_set_affinity(&desc->irq_data, mask, false);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
|
@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata)
|
||||
* For correct operation this depends on the caller
|
||||
* masking the irqs.
|
||||
*/
|
||||
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
|
||||
< nr_cpu_ids)) {
|
||||
int ret = chip->irq_set_affinity(&desc->irq_data,
|
||||
desc->pending_mask, false);
|
||||
switch (ret) {
|
||||
case IRQ_SET_MASK_OK:
|
||||
cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
|
||||
case IRQ_SET_MASK_OK_NOCOPY:
|
||||
irq_set_thread_affinity(desc);
|
||||
}
|
||||
}
|
||||
if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
|
||||
irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
|
||||
|
||||
cpumask_clear(desc->pending_mask);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user