2014-06-30 15:01:31 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2016-04-11 15:32:55 +00:00
|
|
|
#define pr_fmt(fmt) "GICv3: " fmt
|
|
|
|
|
2016-01-19 13:11:15 +00:00
|
|
|
#include <linux/acpi.h>
|
2014-06-30 15:01:31 +00:00
|
|
|
#include <linux/cpu.h>
|
2014-08-26 15:03:35 +00:00
|
|
|
#include <linux/cpu_pm.h>
|
2014-06-30 15:01:31 +00:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/interrupt.h>
|
2016-01-19 13:11:15 +00:00
|
|
|
#include <linux/irqdomain.h>
|
2014-06-30 15:01:31 +00:00
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_address.h>
|
|
|
|
#include <linux/of_irq.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
2015-07-07 21:11:46 +00:00
|
|
|
#include <linux/irqchip.h>
|
2016-04-11 15:32:57 +00:00
|
|
|
#include <linux/irqchip/arm-gic-common.h>
|
2014-06-30 15:01:31 +00:00
|
|
|
#include <linux/irqchip/arm-gic-v3.h>
|
2016-04-11 08:57:54 +00:00
|
|
|
#include <linux/irqchip/irq-partition-percpu.h>
|
2014-06-30 15:01:31 +00:00
|
|
|
|
|
|
|
#include <asm/cputype.h>
|
|
|
|
#include <asm/exception.h>
|
|
|
|
#include <asm/smp_plat.h>
|
2015-08-26 16:00:42 +00:00
|
|
|
#include <asm/virt.h>
|
2014-06-30 15:01:31 +00:00
|
|
|
|
|
|
|
#include "irq-gic-common.h"
|
|
|
|
|
2014-11-24 14:35:10 +00:00
|
|
|
struct redist_region {
|
|
|
|
void __iomem *redist_base;
|
|
|
|
phys_addr_t phys_base;
|
2016-01-19 13:11:16 +00:00
|
|
|
bool single_redist;
|
2014-11-24 14:35:10 +00:00
|
|
|
};
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
struct gic_chip_data {
|
2016-04-11 08:57:54 +00:00
|
|
|
struct fwnode_handle *fwnode;
|
2014-06-30 15:01:31 +00:00
|
|
|
void __iomem *dist_base;
|
2014-11-24 14:35:10 +00:00
|
|
|
struct redist_region *redist_regions;
|
|
|
|
struct rdists rdists;
|
2014-06-30 15:01:31 +00:00
|
|
|
struct irq_domain *domain;
|
|
|
|
u64 redist_stride;
|
2014-11-24 14:35:10 +00:00
|
|
|
u32 nr_redist_regions;
|
2014-06-30 15:01:31 +00:00
|
|
|
unsigned int irq_nr;
|
2016-04-11 08:57:54 +00:00
|
|
|
struct partition_desc *ppi_descs[16];
|
2014-06-30 15:01:31 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct gic_chip_data gic_data __read_mostly;
|
2015-08-26 16:00:42 +00:00
|
|
|
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
|
2014-06-30 15:01:31 +00:00
|
|
|
|
2016-04-11 15:32:57 +00:00
|
|
|
static struct gic_kvm_info gic_v3_kvm_info;
|
|
|
|
|
2014-11-24 14:35:10 +00:00
|
|
|
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
|
|
|
|
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
|
2014-06-30 15:01:31 +00:00
|
|
|
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
|
|
|
|
|
|
|
|
/* Our default, arbitrary priority value. Linux only uses one anyway. */
|
|
|
|
#define DEFAULT_PMR_VALUE 0xf0
|
|
|
|
|
|
|
|
static inline unsigned int gic_irq(struct irq_data *d)
|
|
|
|
{
|
|
|
|
return d->hwirq;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int gic_irq_in_rdist(struct irq_data *d)
|
|
|
|
{
|
|
|
|
return gic_irq(d) < 32;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __iomem *gic_dist_base(struct irq_data *d)
|
|
|
|
{
|
|
|
|
if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
|
|
|
|
return gic_data_rdist_sgi_base();
|
|
|
|
|
|
|
|
if (d->hwirq <= 1023) /* SPI -> dist_base */
|
|
|
|
return gic_data.dist_base;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gic_do_wait_for_rwp(void __iomem *base)
|
|
|
|
{
|
|
|
|
u32 count = 1000000; /* 1s! */
|
|
|
|
|
|
|
|
while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
|
|
|
|
count--;
|
|
|
|
if (!count) {
|
|
|
|
pr_err_ratelimited("RWP timeout, gone fishing\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
cpu_relax();
|
|
|
|
udelay(1);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for completion of a distributor change */
|
|
|
|
static void gic_dist_wait_for_rwp(void)
|
|
|
|
{
|
|
|
|
gic_do_wait_for_rwp(gic_data.dist_base);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for completion of a redistributor change */
|
|
|
|
static void gic_redist_wait_for_rwp(void)
|
|
|
|
{
|
|
|
|
gic_do_wait_for_rwp(gic_data_rdist_rd_base());
|
|
|
|
}
|
|
|
|
|
2015-10-01 12:47:14 +00:00
|
|
|
#ifdef CONFIG_ARM64
|
2015-09-21 20:58:39 +00:00
|
|
|
static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
|
2015-09-21 20:58:35 +00:00
|
|
|
|
|
|
|
static u64 __maybe_unused gic_read_iar(void)
|
|
|
|
{
|
2015-09-21 20:58:39 +00:00
|
|
|
if (static_branch_unlikely(&is_cavium_thunderx))
|
2015-09-21 20:58:35 +00:00
|
|
|
return gic_read_iar_cavium_thunderx();
|
|
|
|
else
|
|
|
|
return gic_read_iar_common();
|
|
|
|
}
|
2015-10-01 12:47:14 +00:00
|
|
|
#endif
|
2014-06-30 15:01:31 +00:00
|
|
|
|
2014-08-26 15:03:34 +00:00
|
|
|
static void gic_enable_redist(bool enable)
|
2014-06-30 15:01:31 +00:00
|
|
|
{
|
|
|
|
void __iomem *rbase;
|
|
|
|
u32 count = 1000000; /* 1s! */
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
rbase = gic_data_rdist_rd_base();
|
|
|
|
|
|
|
|
val = readl_relaxed(rbase + GICR_WAKER);
|
2014-08-26 15:03:34 +00:00
|
|
|
if (enable)
|
|
|
|
/* Wake up this CPU redistributor */
|
|
|
|
val &= ~GICR_WAKER_ProcessorSleep;
|
|
|
|
else
|
|
|
|
val |= GICR_WAKER_ProcessorSleep;
|
2014-06-30 15:01:31 +00:00
|
|
|
writel_relaxed(val, rbase + GICR_WAKER);
|
|
|
|
|
2014-08-26 15:03:34 +00:00
|
|
|
if (!enable) { /* Check that GICR_WAKER is writeable */
|
|
|
|
val = readl_relaxed(rbase + GICR_WAKER);
|
|
|
|
if (!(val & GICR_WAKER_ProcessorSleep))
|
|
|
|
return; /* No PM support in this redistributor */
|
|
|
|
}
|
|
|
|
|
|
|
|
while (count--) {
|
|
|
|
val = readl_relaxed(rbase + GICR_WAKER);
|
2016-05-11 19:23:17 +00:00
|
|
|
if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
|
2014-08-26 15:03:34 +00:00
|
|
|
break;
|
2014-06-30 15:01:31 +00:00
|
|
|
cpu_relax();
|
|
|
|
udelay(1);
|
|
|
|
};
|
2014-08-26 15:03:34 +00:00
|
|
|
if (!count)
|
|
|
|
pr_err_ratelimited("redistributor failed to %s...\n",
|
|
|
|
enable ? "wakeup" : "sleep");
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Routines to disable, enable, EOI and route interrupts
|
|
|
|
*/
|
2015-03-18 11:01:24 +00:00
|
|
|
static int gic_peek_irq(struct irq_data *d, u32 offset)
|
|
|
|
{
|
|
|
|
u32 mask = 1 << (gic_irq(d) % 32);
|
|
|
|
void __iomem *base;
|
|
|
|
|
|
|
|
if (gic_irq_in_rdist(d))
|
|
|
|
base = gic_data_rdist_sgi_base();
|
|
|
|
else
|
|
|
|
base = gic_data.dist_base;
|
|
|
|
|
|
|
|
return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
|
|
|
|
}
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
static void gic_poke_irq(struct irq_data *d, u32 offset)
|
|
|
|
{
|
|
|
|
u32 mask = 1 << (gic_irq(d) % 32);
|
|
|
|
void (*rwp_wait)(void);
|
|
|
|
void __iomem *base;
|
|
|
|
|
|
|
|
if (gic_irq_in_rdist(d)) {
|
|
|
|
base = gic_data_rdist_sgi_base();
|
|
|
|
rwp_wait = gic_redist_wait_for_rwp;
|
|
|
|
} else {
|
|
|
|
base = gic_data.dist_base;
|
|
|
|
rwp_wait = gic_dist_wait_for_rwp;
|
|
|
|
}
|
|
|
|
|
|
|
|
writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
|
|
|
|
rwp_wait();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gic_mask_irq(struct irq_data *d)
|
|
|
|
{
|
|
|
|
gic_poke_irq(d, GICD_ICENABLER);
|
|
|
|
}
|
|
|
|
|
2015-08-26 16:00:42 +00:00
|
|
|
static void gic_eoimode1_mask_irq(struct irq_data *d)
|
|
|
|
{
|
|
|
|
gic_mask_irq(d);
|
2015-08-26 16:00:43 +00:00
|
|
|
/*
|
|
|
|
* When masking a forwarded interrupt, make sure it is
|
|
|
|
* deactivated as well.
|
|
|
|
*
|
|
|
|
* This ensures that an interrupt that is getting
|
|
|
|
* disabled/masked will not get "stuck", because there is
|
|
|
|
* noone to deactivate it (guest is being terminated).
|
|
|
|
*/
|
2015-09-15 11:19:16 +00:00
|
|
|
if (irqd_is_forwarded_to_vcpu(d))
|
2015-08-26 16:00:43 +00:00
|
|
|
gic_poke_irq(d, GICD_ICACTIVER);
|
2015-08-26 16:00:42 +00:00
|
|
|
}
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
static void gic_unmask_irq(struct irq_data *d)
|
|
|
|
{
|
|
|
|
gic_poke_irq(d, GICD_ISENABLER);
|
|
|
|
}
|
|
|
|
|
2015-03-18 11:01:24 +00:00
|
|
|
static int gic_irq_set_irqchip_state(struct irq_data *d,
|
|
|
|
enum irqchip_irq_state which, bool val)
|
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (which) {
|
|
|
|
case IRQCHIP_STATE_PENDING:
|
|
|
|
reg = val ? GICD_ISPENDR : GICD_ICPENDR;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IRQCHIP_STATE_ACTIVE:
|
|
|
|
reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IRQCHIP_STATE_MASKED:
|
|
|
|
reg = val ? GICD_ICENABLER : GICD_ISENABLER;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
gic_poke_irq(d, reg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gic_irq_get_irqchip_state(struct irq_data *d,
|
|
|
|
enum irqchip_irq_state which, bool *val)
|
|
|
|
{
|
|
|
|
if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (which) {
|
|
|
|
case IRQCHIP_STATE_PENDING:
|
|
|
|
*val = gic_peek_irq(d, GICD_ISPENDR);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IRQCHIP_STATE_ACTIVE:
|
|
|
|
*val = gic_peek_irq(d, GICD_ISACTIVER);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IRQCHIP_STATE_MASKED:
|
|
|
|
*val = !gic_peek_irq(d, GICD_ISENABLER);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
static void gic_eoi_irq(struct irq_data *d)
|
|
|
|
{
|
|
|
|
gic_write_eoir(gic_irq(d));
|
|
|
|
}
|
|
|
|
|
2015-08-26 16:00:42 +00:00
|
|
|
static void gic_eoimode1_eoi_irq(struct irq_data *d)
|
|
|
|
{
|
|
|
|
/*
|
2015-08-26 16:00:43 +00:00
|
|
|
* No need to deactivate an LPI, or an interrupt that
|
|
|
|
* is is getting forwarded to a vcpu.
|
2015-08-26 16:00:42 +00:00
|
|
|
*/
|
2015-09-15 11:19:16 +00:00
|
|
|
if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
|
2015-08-26 16:00:42 +00:00
|
|
|
return;
|
|
|
|
gic_write_dir(gic_irq(d));
|
|
|
|
}
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
static int gic_set_type(struct irq_data *d, unsigned int type)
|
|
|
|
{
|
|
|
|
unsigned int irq = gic_irq(d);
|
|
|
|
void (*rwp_wait)(void);
|
|
|
|
void __iomem *base;
|
|
|
|
|
|
|
|
/* Interrupt configuration for SGIs can't be changed */
|
|
|
|
if (irq < 16)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-01-20 16:52:59 +00:00
|
|
|
/* SPIs have restrictions on the supported types */
|
|
|
|
if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
|
|
|
|
type != IRQ_TYPE_EDGE_RISING)
|
2014-06-30 15:01:31 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (gic_irq_in_rdist(d)) {
|
|
|
|
base = gic_data_rdist_sgi_base();
|
|
|
|
rwp_wait = gic_redist_wait_for_rwp;
|
|
|
|
} else {
|
|
|
|
base = gic_data.dist_base;
|
|
|
|
rwp_wait = gic_dist_wait_for_rwp;
|
|
|
|
}
|
|
|
|
|
2015-01-20 16:52:59 +00:00
|
|
|
return gic_configure_irq(irq, type, base, rwp_wait);
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
|
|
|
|
2015-08-26 16:00:43 +00:00
|
|
|
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
|
|
|
|
{
|
2015-09-15 11:19:16 +00:00
|
|
|
if (vcpu)
|
|
|
|
irqd_set_forwarded_to_vcpu(d);
|
|
|
|
else
|
|
|
|
irqd_clr_forwarded_to_vcpu(d);
|
2015-08-26 16:00:43 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-01 12:47:15 +00:00
|
|
|
static u64 gic_mpidr_to_affinity(unsigned long mpidr)
|
2014-06-30 15:01:31 +00:00
|
|
|
{
|
|
|
|
u64 aff;
|
|
|
|
|
2015-10-01 12:47:15 +00:00
|
|
|
aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
|
2014-06-30 15:01:31 +00:00
|
|
|
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
|
|
|
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
|
|
|
MPIDR_AFFINITY_LEVEL(mpidr, 0));
|
|
|
|
|
|
|
|
return aff;
|
|
|
|
}
|
|
|
|
|
|
|
|
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
|
|
|
{
|
2015-10-01 12:47:15 +00:00
|
|
|
u32 irqnr;
|
2014-06-30 15:01:31 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
irqnr = gic_read_iar();
|
|
|
|
|
2014-11-24 14:35:18 +00:00
|
|
|
if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
|
2014-08-26 10:03:33 +00:00
|
|
|
int err;
|
2015-08-26 16:00:42 +00:00
|
|
|
|
|
|
|
if (static_key_true(&supports_deactivate))
|
|
|
|
gic_write_eoir(irqnr);
|
|
|
|
|
2014-08-26 10:03:33 +00:00
|
|
|
err = handle_domain_irq(gic_data.domain, irqnr, regs);
|
|
|
|
if (err) {
|
2014-11-24 14:35:18 +00:00
|
|
|
WARN_ONCE(true, "Unexpected interrupt received!\n");
|
2015-08-26 16:00:42 +00:00
|
|
|
if (static_key_true(&supports_deactivate)) {
|
|
|
|
if (irqnr < 8192)
|
|
|
|
gic_write_dir(irqnr);
|
|
|
|
} else {
|
|
|
|
gic_write_eoir(irqnr);
|
|
|
|
}
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
2014-08-26 10:03:33 +00:00
|
|
|
continue;
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
|
|
|
if (irqnr < 16) {
|
|
|
|
gic_write_eoir(irqnr);
|
2015-08-26 16:00:42 +00:00
|
|
|
if (static_key_true(&supports_deactivate))
|
|
|
|
gic_write_dir(irqnr);
|
2014-06-30 15:01:31 +00:00
|
|
|
#ifdef CONFIG_SMP
|
irqchip/gic: Ensure ordering between read of INTACK and shared data
When an IPI is generated by a CPU, the pattern looks roughly like:
<write shared data>
smp_wmb();
<write to GIC to signal SGI>
On the receiving CPU we rely on the fact that, once we've taken the
interrupt, then the freshly written shared data must be visible to us.
Put another way, the CPU isn't going to speculate taking an interrupt.
Unfortunately, this assumption turns out to be broken.
Consider that CPUx wants to send an IPI to CPUy, which will cause CPUy
to read some shared_data. Before CPUx has done anything, a random
peripheral raises an IRQ to the GIC and the IRQ line on CPUy is raised.
CPUy then takes the IRQ and starts executing the entry code, heading
towards gic_handle_irq. Furthermore, let's assume that a bunch of the
previous interrupts handled by CPUy were SGIs, so the branch predictor
kicks in and speculates that irqnr will be <16 and we're likely to
head into handle_IPI. The prefetcher then grabs a speculative copy of
shared_data which contains a stale value.
Meanwhile, CPUx gets round to updating shared_data and asking the GIC
to send an SGI to CPUy. Internally, the GIC decides that the SGI is
more important than the peripheral interrupt (which hasn't yet been
ACKed) but doesn't need to do anything to CPUy, because the IRQ line
is already raised.
CPUy then reads the ACK register on the GIC, sees the SGI value which
confirms the branch prediction and we end up with a stale shared_data
value.
This patch fixes the problem by adding an smp_rmb() to the IPI entry
code in gic_handle_irq. As it turns out, the combination of a control
dependency and an ISB instruction from the EOI in the GICv3 driver is
enough to provide the ordering we need, so we add a comment there
justifying the absence of an explicit smp_rmb().
Cc: stable@vger.kernel.org
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2016-04-26 11:00:00 +00:00
|
|
|
/*
|
|
|
|
* Unlike GICv2, we don't need an smp_rmb() here.
|
|
|
|
* The control dependency from gic_read_iar to
|
|
|
|
* the ISB in gic_write_eoir is enough to ensure
|
|
|
|
* that any shared data read by handle_IPI will
|
|
|
|
* be read after the ACK.
|
|
|
|
*/
|
2014-06-30 15:01:31 +00:00
|
|
|
handle_IPI(irqnr, regs);
|
|
|
|
#else
|
|
|
|
WARN_ONCE(true, "Unexpected SGI received!\n");
|
|
|
|
#endif
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} while (irqnr != ICC_IAR1_EL1_SPURIOUS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init gic_dist_init(void)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
u64 affinity;
|
|
|
|
void __iomem *base = gic_data.dist_base;
|
|
|
|
|
|
|
|
/* Disable the distributor */
|
|
|
|
writel_relaxed(0, base + GICD_CTLR);
|
|
|
|
gic_dist_wait_for_rwp();
|
|
|
|
|
2016-05-06 18:41:56 +00:00
|
|
|
/*
|
|
|
|
* Configure SPIs as non-secure Group-1. This will only matter
|
|
|
|
* if the GIC only has a single security state. This will not
|
|
|
|
* do the right thing if the kernel is running in secure mode,
|
|
|
|
* but that's not the intended use case anyway.
|
|
|
|
*/
|
|
|
|
for (i = 32; i < gic_data.irq_nr; i += 32)
|
|
|
|
writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
|
|
|
|
|
|
|
|
/* Enable distributor with ARE, Group1 */
|
|
|
|
writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
|
|
|
|
base + GICD_CTLR);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set all global interrupts to the boot CPU only. ARE must be
|
|
|
|
* enabled.
|
|
|
|
*/
|
|
|
|
affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
|
|
|
|
for (i = 32; i < gic_data.irq_nr; i++)
|
2015-10-01 12:47:16 +00:00
|
|
|
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gic_populate_rdist(void)
|
|
|
|
{
|
2015-10-01 12:47:15 +00:00
|
|
|
unsigned long mpidr = cpu_logical_map(smp_processor_id());
|
2014-06-30 15:01:31 +00:00
|
|
|
u64 typer;
|
|
|
|
u32 aff;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Convert affinity to a 32bit value that can be matched to
|
|
|
|
* GICR_TYPER bits [63:32].
|
|
|
|
*/
|
|
|
|
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
|
|
|
|
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
|
|
|
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
|
|
|
MPIDR_AFFINITY_LEVEL(mpidr, 0));
|
|
|
|
|
2014-11-24 14:35:10 +00:00
|
|
|
for (i = 0; i < gic_data.nr_redist_regions; i++) {
|
|
|
|
void __iomem *ptr = gic_data.redist_regions[i].redist_base;
|
2014-06-30 15:01:31 +00:00
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
|
|
|
|
if (reg != GIC_PIDR2_ARCH_GICv3 &&
|
|
|
|
reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
|
|
|
|
pr_warn("No redistributor present @%p\n", ptr);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
2015-10-01 12:47:16 +00:00
|
|
|
typer = gic_read_typer(ptr + GICR_TYPER);
|
2014-06-30 15:01:31 +00:00
|
|
|
if ((typer >> 32) == aff) {
|
2014-11-24 14:35:10 +00:00
|
|
|
u64 offset = ptr - gic_data.redist_regions[i].redist_base;
|
2014-06-30 15:01:31 +00:00
|
|
|
gic_data_rdist_rd_base() = ptr;
|
2014-11-24 14:35:10 +00:00
|
|
|
gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
|
2015-10-01 12:47:15 +00:00
|
|
|
pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
|
|
|
|
smp_processor_id(), mpidr, i,
|
|
|
|
&gic_data_rdist()->phys_base);
|
2014-06-30 15:01:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:16 +00:00
|
|
|
if (gic_data.redist_regions[i].single_redist)
|
|
|
|
break;
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
if (gic_data.redist_stride) {
|
|
|
|
ptr += gic_data.redist_stride;
|
|
|
|
} else {
|
|
|
|
ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
|
|
|
|
if (typer & GICR_TYPER_VLPIS)
|
|
|
|
ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
|
|
|
|
}
|
|
|
|
} while (!(typer & GICR_TYPER_LAST));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We couldn't even deal with ourselves... */
|
2015-10-01 12:47:15 +00:00
|
|
|
WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
|
|
|
|
smp_processor_id(), mpidr);
|
2014-06-30 15:01:31 +00:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2014-08-26 15:03:35 +00:00
|
|
|
static void gic_cpu_sys_reg_init(void)
|
|
|
|
{
|
2015-09-30 10:48:01 +00:00
|
|
|
/*
|
|
|
|
* Need to check that the SRE bit has actually been set. If
|
|
|
|
* not, it means that SRE is disabled at EL2. We're going to
|
|
|
|
* die painfully, and there is nothing we can do about it.
|
|
|
|
*
|
|
|
|
* Kindly inform the luser.
|
|
|
|
*/
|
|
|
|
if (!gic_enable_sre())
|
|
|
|
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
|
2014-08-26 15:03:35 +00:00
|
|
|
|
|
|
|
/* Set priority mask register */
|
|
|
|
gic_write_pmr(DEFAULT_PMR_VALUE);
|
|
|
|
|
2015-08-26 16:00:42 +00:00
|
|
|
if (static_key_true(&supports_deactivate)) {
|
|
|
|
/* EOI drops priority only (mode 1) */
|
|
|
|
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
|
|
|
|
} else {
|
|
|
|
/* EOI deactivates interrupt too (mode 0) */
|
|
|
|
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
|
|
|
|
}
|
2014-08-26 15:03:35 +00:00
|
|
|
|
|
|
|
/* ... and let's hit the road... */
|
|
|
|
gic_write_grpen1(1);
|
|
|
|
}
|
|
|
|
|
2014-11-24 14:35:18 +00:00
|
|
|
static int gic_dist_supports_lpis(void)
|
|
|
|
{
|
|
|
|
return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS);
|
|
|
|
}
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
static void gic_cpu_init(void)
|
|
|
|
{
|
|
|
|
void __iomem *rbase;
|
|
|
|
|
|
|
|
/* Register ourselves with the rest of the world */
|
|
|
|
if (gic_populate_rdist())
|
|
|
|
return;
|
|
|
|
|
2014-08-26 15:03:34 +00:00
|
|
|
gic_enable_redist(true);
|
2014-06-30 15:01:31 +00:00
|
|
|
|
|
|
|
rbase = gic_data_rdist_sgi_base();
|
|
|
|
|
2016-05-06 18:41:56 +00:00
|
|
|
/* Configure SGIs/PPIs as non-secure Group-1 */
|
|
|
|
writel_relaxed(~0, rbase + GICR_IGROUPR0);
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
|
|
|
|
|
2014-11-24 14:35:18 +00:00
|
|
|
/* Give LPIs a spin */
|
|
|
|
if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
|
|
|
|
its_cpu_init();
|
|
|
|
|
2014-08-26 15:03:35 +00:00
|
|
|
/* initialise system registers */
|
|
|
|
gic_cpu_sys_reg_init();
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static int gic_secondary_init(struct notifier_block *nfb,
|
|
|
|
unsigned long action, void *hcpu)
|
|
|
|
{
|
|
|
|
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
|
|
|
|
gic_cpu_init();
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notifier for enabling the GIC CPU interface. Set an arbitrarily high
|
|
|
|
* priority because the GIC needs to be up before the ARM generic timers.
|
|
|
|
*/
|
|
|
|
static struct notifier_block gic_cpu_notifier = {
|
|
|
|
.notifier_call = gic_secondary_init,
|
|
|
|
.priority = 100,
|
|
|
|
};
|
|
|
|
|
|
|
|
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
2015-10-01 12:47:15 +00:00
|
|
|
unsigned long cluster_id)
|
2014-06-30 15:01:31 +00:00
|
|
|
{
|
|
|
|
int cpu = *base_cpu;
|
2015-10-01 12:47:15 +00:00
|
|
|
unsigned long mpidr = cpu_logical_map(cpu);
|
2014-06-30 15:01:31 +00:00
|
|
|
u16 tlist = 0;
|
|
|
|
|
|
|
|
while (cpu < nr_cpu_ids) {
|
|
|
|
/*
|
|
|
|
* If we ever get a cluster of more than 16 CPUs, just
|
|
|
|
* scream and skip that CPU.
|
|
|
|
*/
|
|
|
|
if (WARN_ON((mpidr & 0xff) >= 16))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
tlist |= 1 << (mpidr & 0xf);
|
|
|
|
|
|
|
|
cpu = cpumask_next(cpu, mask);
|
2015-03-06 16:37:45 +00:00
|
|
|
if (cpu >= nr_cpu_ids)
|
2014-06-30 15:01:31 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
mpidr = cpu_logical_map(cpu);
|
|
|
|
|
|
|
|
if (cluster_id != (mpidr & ~0xffUL)) {
|
|
|
|
cpu--;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
*base_cpu = cpu;
|
|
|
|
return tlist;
|
|
|
|
}
|
|
|
|
|
2014-11-12 13:46:06 +00:00
|
|
|
#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
|
|
|
|
(MPIDR_AFFINITY_LEVEL(cluster_id, level) \
|
|
|
|
<< ICC_SGI1R_AFFINITY_## level ##_SHIFT)
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
|
|
|
|
{
|
|
|
|
u64 val;
|
|
|
|
|
2014-11-12 13:46:06 +00:00
|
|
|
val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
|
|
|
|
MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
|
|
|
|
irq << ICC_SGI1R_SGI_ID_SHIFT |
|
|
|
|
MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
|
|
|
|
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
|
2014-06-30 15:01:31 +00:00
|
|
|
|
|
|
|
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
|
|
|
|
gic_write_sgi1r(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (WARN_ON(irq >= 16))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that stores to Normal memory are visible to the
|
|
|
|
* other CPUs before issuing the IPI.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
|
2015-03-05 00:19:16 +00:00
|
|
|
for_each_cpu(cpu, mask) {
|
2015-10-01 12:47:15 +00:00
|
|
|
unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
|
2014-06-30 15:01:31 +00:00
|
|
|
u16 tlist;
|
|
|
|
|
|
|
|
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
|
|
|
|
gic_send_sgi(cluster_id, tlist, irq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Force the above writes to ICC_SGI1R_EL1 to be executed */
|
|
|
|
isb();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gic_smp_init(void)
|
|
|
|
{
|
|
|
|
set_smp_cross_call(gic_raise_softirq);
|
|
|
|
register_cpu_notifier(&gic_cpu_notifier);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
|
|
|
bool force)
|
|
|
|
{
|
|
|
|
unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
|
|
|
void __iomem *reg;
|
|
|
|
int enabled;
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
if (gic_irq_in_rdist(d))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* If interrupt was enabled, disable it first */
|
|
|
|
enabled = gic_peek_irq(d, GICD_ISENABLER);
|
|
|
|
if (enabled)
|
|
|
|
gic_mask_irq(d);
|
|
|
|
|
|
|
|
reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
|
|
|
|
val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
|
|
|
|
|
2015-10-01 12:47:16 +00:00
|
|
|
gic_write_irouter(val, reg);
|
2014-06-30 15:01:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the interrupt was enabled, enabled it again. Otherwise,
|
|
|
|
* just wait for the distributor to have digested our changes.
|
|
|
|
*/
|
|
|
|
if (enabled)
|
|
|
|
gic_unmask_irq(d);
|
|
|
|
else
|
|
|
|
gic_dist_wait_for_rwp();
|
|
|
|
|
2016-02-19 15:22:43 +00:00
|
|
|
return IRQ_SET_MASK_OK_DONE;
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define gic_set_affinity NULL
|
|
|
|
#define gic_smp_init() do { } while(0)
|
|
|
|
#endif
|
|
|
|
|
2014-08-26 15:03:35 +00:00
|
|
|
#ifdef CONFIG_CPU_PM
|
|
|
|
static int gic_cpu_pm_notifier(struct notifier_block *self,
|
|
|
|
unsigned long cmd, void *v)
|
|
|
|
{
|
|
|
|
if (cmd == CPU_PM_EXIT) {
|
|
|
|
gic_enable_redist(true);
|
|
|
|
gic_cpu_sys_reg_init();
|
|
|
|
} else if (cmd == CPU_PM_ENTER) {
|
|
|
|
gic_write_grpen1(0);
|
|
|
|
gic_enable_redist(false);
|
|
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block gic_cpu_pm_notifier_block = {
|
|
|
|
.notifier_call = gic_cpu_pm_notifier,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void gic_cpu_pm_init(void)
|
|
|
|
{
|
|
|
|
cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
static inline void gic_cpu_pm_init(void) { }
|
|
|
|
#endif /* CONFIG_CPU_PM */
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
static struct irq_chip gic_chip = {
|
|
|
|
.name = "GICv3",
|
|
|
|
.irq_mask = gic_mask_irq,
|
|
|
|
.irq_unmask = gic_unmask_irq,
|
|
|
|
.irq_eoi = gic_eoi_irq,
|
|
|
|
.irq_set_type = gic_set_type,
|
|
|
|
.irq_set_affinity = gic_set_affinity,
|
2015-03-18 11:01:24 +00:00
|
|
|
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
|
|
|
|
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
|
2015-06-05 10:59:57 +00:00
|
|
|
.flags = IRQCHIP_SET_TYPE_MASKED,
|
2014-06-30 15:01:31 +00:00
|
|
|
};
|
|
|
|
|
2015-08-26 16:00:42 +00:00
|
|
|
static struct irq_chip gic_eoimode1_chip = {
|
|
|
|
.name = "GICv3",
|
|
|
|
.irq_mask = gic_eoimode1_mask_irq,
|
|
|
|
.irq_unmask = gic_unmask_irq,
|
|
|
|
.irq_eoi = gic_eoimode1_eoi_irq,
|
|
|
|
.irq_set_type = gic_set_type,
|
|
|
|
.irq_set_affinity = gic_set_affinity,
|
|
|
|
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
|
|
|
|
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
|
2015-08-26 16:00:43 +00:00
|
|
|
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
|
2015-08-26 16:00:42 +00:00
|
|
|
.flags = IRQCHIP_SET_TYPE_MASKED,
|
|
|
|
};
|
|
|
|
|
2014-11-24 14:35:18 +00:00
|
|
|
#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
|
|
|
irq_hw_number_t hw)
|
|
|
|
{
|
2015-08-26 16:00:42 +00:00
|
|
|
struct irq_chip *chip = &gic_chip;
|
|
|
|
|
|
|
|
if (static_key_true(&supports_deactivate))
|
|
|
|
chip = &gic_eoimode1_chip;
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
/* SGIs are private to the core kernel */
|
|
|
|
if (hw < 16)
|
|
|
|
return -EPERM;
|
2014-11-24 14:35:18 +00:00
|
|
|
/* Nothing here */
|
|
|
|
if (hw >= gic_data.irq_nr && hw < 8192)
|
|
|
|
return -EPERM;
|
|
|
|
/* Off limits */
|
|
|
|
if (hw >= GIC_ID_NR)
|
|
|
|
return -EPERM;
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
/* PPIs */
|
|
|
|
if (hw < 32) {
|
|
|
|
irq_set_percpu_devid(irq);
|
2015-08-26 16:00:42 +00:00
|
|
|
irq_domain_set_info(d, irq, hw, chip, d->host_data,
|
2014-11-24 14:35:09 +00:00
|
|
|
handle_percpu_devid_irq, NULL, NULL);
|
2015-08-29 23:01:22 +00:00
|
|
|
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
|
|
|
/* SPIs */
|
|
|
|
if (hw >= 32 && hw < gic_data.irq_nr) {
|
2015-08-26 16:00:42 +00:00
|
|
|
irq_domain_set_info(d, irq, hw, chip, d->host_data,
|
2014-11-24 14:35:09 +00:00
|
|
|
handle_fasteoi_irq, NULL, NULL);
|
2015-08-29 23:01:22 +00:00
|
|
|
irq_set_probe(irq);
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
2014-11-24 14:35:18 +00:00
|
|
|
/* LPIs */
|
|
|
|
if (hw >= 8192 && hw < GIC_ID_NR) {
|
|
|
|
if (!gic_dist_supports_lpis())
|
|
|
|
return -EPERM;
|
2015-08-26 16:00:42 +00:00
|
|
|
irq_domain_set_info(d, irq, hw, chip, d->host_data,
|
2014-11-24 14:35:18 +00:00
|
|
|
handle_fasteoi_irq, NULL, NULL);
|
|
|
|
}
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-13 11:51:33 +00:00
|
|
|
static int gic_irq_domain_translate(struct irq_domain *d,
|
|
|
|
struct irq_fwspec *fwspec,
|
|
|
|
unsigned long *hwirq,
|
|
|
|
unsigned int *type)
|
2014-06-30 15:01:31 +00:00
|
|
|
{
|
2015-10-13 11:51:33 +00:00
|
|
|
if (is_of_node(fwspec->fwnode)) {
|
|
|
|
if (fwspec->param_count < 3)
|
|
|
|
return -EINVAL;
|
2014-06-30 15:01:31 +00:00
|
|
|
|
2015-10-14 11:27:16 +00:00
|
|
|
switch (fwspec->param[0]) {
|
|
|
|
case 0: /* SPI */
|
|
|
|
*hwirq = fwspec->param[1] + 32;
|
|
|
|
break;
|
|
|
|
case 1: /* PPI */
|
|
|
|
*hwirq = fwspec->param[1] + 16;
|
|
|
|
break;
|
|
|
|
case GIC_IRQ_TYPE_LPI: /* LPI */
|
|
|
|
*hwirq = fwspec->param[1];
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2015-10-13 11:51:33 +00:00
|
|
|
|
|
|
|
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
|
|
|
|
return 0;
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:15 +00:00
|
|
|
if (is_fwnode_irqchip(fwspec->fwnode)) {
|
|
|
|
if(fwspec->param_count != 2)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*hwirq = fwspec->param[0];
|
|
|
|
*type = fwspec->param[1];
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-10-13 11:51:33 +00:00
|
|
|
return -EINVAL;
|
2014-06-30 15:01:31 +00:00
|
|
|
}
|
|
|
|
|
2014-11-24 14:35:09 +00:00
|
|
|
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
|
|
|
unsigned int nr_irqs, void *arg)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
irq_hw_number_t hwirq;
|
|
|
|
unsigned int type = IRQ_TYPE_NONE;
|
2015-10-13 11:51:33 +00:00
|
|
|
struct irq_fwspec *fwspec = arg;
|
2014-11-24 14:35:09 +00:00
|
|
|
|
2015-10-13 11:51:33 +00:00
|
|
|
ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
|
2014-11-24 14:35:09 +00:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_irqs; i++)
|
|
|
|
gic_irq_domain_map(domain, virq + i, hwirq + i);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
|
|
|
|
unsigned int nr_irqs)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_irqs; i++) {
|
|
|
|
struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
|
|
|
|
irq_set_handler(virq + i, NULL);
|
|
|
|
irq_domain_reset_irq_data(d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-11 08:57:54 +00:00
|
|
|
static int gic_irq_domain_select(struct irq_domain *d,
|
|
|
|
struct irq_fwspec *fwspec,
|
|
|
|
enum irq_domain_bus_token bus_token)
|
|
|
|
{
|
|
|
|
/* Not for us */
|
|
|
|
if (fwspec->fwnode != d->fwnode)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* If this is not DT, then we have a single domain */
|
|
|
|
if (!is_of_node(fwspec->fwnode))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is a PPI and we have a 4th (non-null) parameter,
|
|
|
|
* then we need to match the partition domain.
|
|
|
|
*/
|
|
|
|
if (fwspec->param_count >= 4 &&
|
|
|
|
fwspec->param[0] == 1 && fwspec->param[3] != 0)
|
|
|
|
return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
|
|
|
|
|
|
|
|
return d == gic_data.domain;
|
|
|
|
}
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
static const struct irq_domain_ops gic_irq_domain_ops = {
|
2015-10-13 11:51:33 +00:00
|
|
|
.translate = gic_irq_domain_translate,
|
2014-11-24 14:35:09 +00:00
|
|
|
.alloc = gic_irq_domain_alloc,
|
|
|
|
.free = gic_irq_domain_free,
|
2016-04-11 08:57:54 +00:00
|
|
|
.select = gic_irq_domain_select,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int partition_domain_translate(struct irq_domain *d,
|
|
|
|
struct irq_fwspec *fwspec,
|
|
|
|
unsigned long *hwirq,
|
|
|
|
unsigned int *type)
|
|
|
|
{
|
|
|
|
struct device_node *np;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
np = of_find_node_by_phandle(fwspec->param[3]);
|
|
|
|
if (WARN_ON(!np))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
|
|
|
|
of_node_to_fwnode(np));
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
*hwirq = ret;
|
|
|
|
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct irq_domain_ops partition_domain_ops = {
|
|
|
|
.translate = partition_domain_translate,
|
|
|
|
.select = gic_irq_domain_select,
|
2014-06-30 15:01:31 +00:00
|
|
|
};
|
|
|
|
|
2015-09-21 20:58:35 +00:00
|
|
|
static void gicv3_enable_quirks(void)
|
|
|
|
{
|
2015-10-01 12:47:14 +00:00
|
|
|
#ifdef CONFIG_ARM64
|
2015-09-21 20:58:35 +00:00
|
|
|
if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
|
2015-09-21 20:58:39 +00:00
|
|
|
static_branch_enable(&is_cavium_thunderx);
|
2015-10-01 12:47:14 +00:00
|
|
|
#endif
|
2015-09-21 20:58:35 +00:00
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:14 +00:00
|
|
|
static int __init gic_init_bases(void __iomem *dist_base,
|
|
|
|
struct redist_region *rdist_regs,
|
|
|
|
u32 nr_redist_regions,
|
|
|
|
u64 redist_stride,
|
|
|
|
struct fwnode_handle *handle)
|
2014-06-30 15:01:31 +00:00
|
|
|
{
|
2016-01-19 13:11:14 +00:00
|
|
|
struct device_node *node;
|
2014-11-24 14:35:10 +00:00
|
|
|
u32 typer;
|
2014-06-30 15:01:31 +00:00
|
|
|
int gic_irqs;
|
|
|
|
int err;
|
|
|
|
|
2015-08-26 16:00:42 +00:00
|
|
|
if (!is_hyp_mode_available())
|
|
|
|
static_key_slow_dec(&supports_deactivate);
|
|
|
|
|
|
|
|
if (static_key_true(&supports_deactivate))
|
|
|
|
pr_info("GIC: Using split EOI/Deactivate mode\n");
|
|
|
|
|
2016-04-11 08:57:54 +00:00
|
|
|
gic_data.fwnode = handle;
|
2014-06-30 15:01:31 +00:00
|
|
|
gic_data.dist_base = dist_base;
|
2014-11-24 14:35:10 +00:00
|
|
|
gic_data.redist_regions = rdist_regs;
|
|
|
|
gic_data.nr_redist_regions = nr_redist_regions;
|
2014-06-30 15:01:31 +00:00
|
|
|
gic_data.redist_stride = redist_stride;
|
|
|
|
|
2015-09-21 20:58:35 +00:00
|
|
|
gicv3_enable_quirks();
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
/*
|
|
|
|
* Find out how many interrupts are supported.
|
|
|
|
* The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
|
|
|
|
*/
|
2014-11-24 14:35:10 +00:00
|
|
|
typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
|
|
|
|
gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
|
|
|
|
gic_irqs = GICD_TYPER_IRQS(typer);
|
2014-06-30 15:01:31 +00:00
|
|
|
if (gic_irqs > 1020)
|
|
|
|
gic_irqs = 1020;
|
|
|
|
gic_data.irq_nr = gic_irqs;
|
|
|
|
|
2016-01-19 13:11:14 +00:00
|
|
|
gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
|
|
|
|
&gic_data);
|
2014-11-24 14:35:10 +00:00
|
|
|
gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
|
2014-06-30 15:01:31 +00:00
|
|
|
|
2014-11-24 14:35:10 +00:00
|
|
|
if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
|
2014-06-30 15:01:31 +00:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_handle_irq(gic_handle_irq);
|
|
|
|
|
2016-01-19 13:11:14 +00:00
|
|
|
node = to_of_node(handle);
|
|
|
|
if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
|
|
|
|
node) /* Temp hack to prevent ITS init for ACPI */
|
2014-11-24 14:35:18 +00:00
|
|
|
its_init(node, &gic_data.rdists, gic_data.domain);
|
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
gic_smp_init();
|
|
|
|
gic_dist_init();
|
|
|
|
gic_cpu_init();
|
2014-08-26 15:03:35 +00:00
|
|
|
gic_cpu_pm_init();
|
2014-06-30 15:01:31 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_free:
|
|
|
|
if (gic_data.domain)
|
|
|
|
irq_domain_remove(gic_data.domain);
|
2014-11-24 14:35:10 +00:00
|
|
|
free_percpu(gic_data.rdists.rdist);
|
2016-01-19 13:11:14 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init gic_validate_dist_version(void __iomem *dist_base)
|
|
|
|
{
|
|
|
|
u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
|
|
|
|
|
|
|
|
if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-11 08:57:54 +00:00
|
|
|
static int get_cpu_number(struct device_node *dn)
|
|
|
|
{
|
|
|
|
const __be32 *cell;
|
|
|
|
u64 hwid;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
cell = of_get_property(dn, "reg", NULL);
|
|
|
|
if (!cell)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
hwid = of_read_number(cell, of_n_addr_cells(dn));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Non affinity bits must be set to 0 in the DT
|
|
|
|
*/
|
|
|
|
if (hwid & ~MPIDR_HWID_BITMASK)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
for (i = 0; i < num_possible_cpus(); i++)
|
|
|
|
if (cpu_logical_map(i) == hwid)
|
|
|
|
return i;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create all possible partitions at boot time */
|
Small release overall.
- x86: miscellaneous fixes, AVIC support (local APIC virtualization,
AMD version)
- s390: polling for interrupts after a VCPU goes to halted state is
now enabled for s390; use hardware provided information about facility
bits that do not need any hypervisor activity, and other fixes for
cpu models and facilities; improve perf output; floating interrupt
controller improvements.
- MIPS: miscellaneous fixes
- PPC: bugfixes only
- ARM: 16K page size support, generic firmware probing layer for
timer and GIC
Christoffer Dall (KVM-ARM maintainer) says:
"There are a few changes in this pull request touching things outside
KVM, but they should all carry the necessary acks and it made the
merge process much easier to do it this way."
though actually the irqchip maintainers' acks didn't make it into the
patches. Marc Zyngier, who is both irqchip and KVM-ARM maintainer,
later acked at http://mid.gmane.org/573351D1.4060303@arm.com
"more formally and for documentation purposes".
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.22 (GNU/Linux)
iQEcBAABAgAGBQJXPJjyAAoJEL/70l94x66DhioH/j4fwQ0FmfPSM9PArzaFHQdx
LNE3tU4+bobbsy1BJr4DiAaOUQn3DAgwUvGLWXdeLiOXtoWXBiFHKaxlqEsCA6iQ
xcTH1TgfxsVoqGQ6bT9X/2GCx70heYpcWG3f+zqBy7ZfFmQykLAC/HwOr52VQL8f
hUFi3YmTHcnorp0n5Xg+9r3+RBS4D/kTbtdn6+KCLnPJ0RcgNkI3/NcafTemoofw
Tkv8+YYFNvKV13qlIfVqxMa0GwWI3pP6YaNKhaS5XO8Pu16HuuF1JthJsUBDzwBa
RInp8R9MoXgsBYhLpz3jc9vWG7G9yDl5LehsD9KOUGOaFYJ7sQN+QZOusa6jFgA=
=llO5
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"Small release overall.
x86:
- miscellaneous fixes
- AVIC support (local APIC virtualization, AMD version)
s390:
- polling for interrupts after a VCPU goes to halted state is now
enabled for s390
- use hardware provided information about facility bits that do not
need any hypervisor activity, and other fixes for cpu models and
facilities
- improve perf output
- floating interrupt controller improvements.
MIPS:
- miscellaneous fixes
PPC:
- bugfixes only
ARM:
- 16K page size support
- generic firmware probing layer for timer and GIC
Christoffer Dall (KVM-ARM maintainer) says:
"There are a few changes in this pull request touching things
outside KVM, but they should all carry the necessary acks and it
made the merge process much easier to do it this way."
though actually the irqchip maintainers' acks didn't make it into the
patches. Marc Zyngier, who is both irqchip and KVM-ARM maintainer,
later acked at http://mid.gmane.org/573351D1.4060303@arm.com ('more
formally and for documentation purposes')"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (82 commits)
KVM: MTRR: remove MSR 0x2f8
KVM: x86: make hwapic_isr_update and hwapic_irr_update look the same
svm: Manage vcpu load/unload when enable AVIC
svm: Do not intercept CR8 when enable AVIC
svm: Do not expose x2APIC when enable AVIC
KVM: x86: Introducing kvm_x86_ops.apicv_post_state_restore
svm: Add VMEXIT handlers for AVIC
svm: Add interrupt injection via AVIC
KVM: x86: Detect and Initialize AVIC support
svm: Introduce new AVIC VMCB registers
KVM: split kvm_vcpu_wake_up from kvm_vcpu_kick
KVM: x86: Introducing kvm_x86_ops VCPU blocking/unblocking hooks
KVM: x86: Introducing kvm_x86_ops VM init/destroy hooks
KVM: x86: Rename kvm_apic_get_reg to kvm_lapic_get_reg
KVM: x86: Misc LAPIC changes to expose helper functions
KVM: shrink halt polling even more for invalid wakeups
KVM: s390: set halt polling to 80 microseconds
KVM: halt_polling: provide a way to qualify wakeups during poll
KVM: PPC: Book3S HV: Re-enable XICS fast path for irqfd-generated interrupts
kvm: Conditionally register IRQ bypass consumer
...
2016-05-19 18:27:09 +00:00
|
|
|
static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
|
2016-04-11 08:57:54 +00:00
|
|
|
{
|
|
|
|
struct device_node *parts_node, *child_part;
|
|
|
|
int part_idx = 0, i;
|
|
|
|
int nr_parts;
|
|
|
|
struct partition_affinity *parts;
|
|
|
|
|
|
|
|
parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
|
|
|
|
if (!parts_node)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nr_parts = of_get_child_count(parts_node);
|
|
|
|
|
|
|
|
if (!nr_parts)
|
|
|
|
return;
|
|
|
|
|
|
|
|
parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
|
|
|
|
if (WARN_ON(!parts))
|
|
|
|
return;
|
|
|
|
|
|
|
|
for_each_child_of_node(parts_node, child_part) {
|
|
|
|
struct partition_affinity *part;
|
|
|
|
int n;
|
|
|
|
|
|
|
|
part = &parts[part_idx];
|
|
|
|
|
|
|
|
part->partition_id = of_node_to_fwnode(child_part);
|
|
|
|
|
|
|
|
pr_info("GIC: PPI partition %s[%d] { ",
|
|
|
|
child_part->name, part_idx);
|
|
|
|
|
|
|
|
n = of_property_count_elems_of_size(child_part, "affinity",
|
|
|
|
sizeof(u32));
|
|
|
|
WARN_ON(n <= 0);
|
|
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
int err, cpu;
|
|
|
|
u32 cpu_phandle;
|
|
|
|
struct device_node *cpu_node;
|
|
|
|
|
|
|
|
err = of_property_read_u32_index(child_part, "affinity",
|
|
|
|
i, &cpu_phandle);
|
|
|
|
if (WARN_ON(err))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
cpu_node = of_find_node_by_phandle(cpu_phandle);
|
|
|
|
if (WARN_ON(!cpu_node))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
cpu = get_cpu_number(cpu_node);
|
|
|
|
if (WARN_ON(cpu == -1))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pr_cont("%s[%d] ", cpu_node->full_name, cpu);
|
|
|
|
|
|
|
|
cpumask_set_cpu(cpu, &part->mask);
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_cont("}\n");
|
|
|
|
part_idx++;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
|
|
|
unsigned int irq;
|
|
|
|
struct partition_desc *desc;
|
|
|
|
struct irq_fwspec ppi_fwspec = {
|
|
|
|
.fwnode = gic_data.fwnode,
|
|
|
|
.param_count = 3,
|
|
|
|
.param = {
|
|
|
|
[0] = 1,
|
|
|
|
[1] = i,
|
|
|
|
[2] = IRQ_TYPE_NONE,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
irq = irq_create_fwspec_mapping(&ppi_fwspec);
|
|
|
|
if (WARN_ON(!irq))
|
|
|
|
continue;
|
|
|
|
desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
|
|
|
|
irq, &partition_domain_ops);
|
|
|
|
if (WARN_ON(!desc))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
gic_data.ppi_descs[i] = desc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-11 15:32:57 +00:00
|
|
|
static void __init gic_of_setup_kvm_info(struct device_node *node)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct resource r;
|
|
|
|
u32 gicv_idx;
|
|
|
|
|
|
|
|
gic_v3_kvm_info.type = GIC_V3;
|
|
|
|
|
|
|
|
gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
|
|
|
|
if (!gic_v3_kvm_info.maint_irq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (of_property_read_u32(node, "#redistributor-regions",
|
|
|
|
&gicv_idx))
|
|
|
|
gicv_idx = 1;
|
|
|
|
|
|
|
|
gicv_idx += 3; /* Also skip GICD, GICC, GICH */
|
|
|
|
ret = of_address_to_resource(node, gicv_idx, &r);
|
|
|
|
if (!ret)
|
|
|
|
gic_v3_kvm_info.vcpu = r;
|
|
|
|
|
|
|
|
gic_set_kvm_info(&gic_v3_kvm_info);
|
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:14 +00:00
|
|
|
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
|
|
|
|
{
|
|
|
|
void __iomem *dist_base;
|
|
|
|
struct redist_region *rdist_regs;
|
|
|
|
u64 redist_stride;
|
|
|
|
u32 nr_redist_regions;
|
|
|
|
int err, i;
|
|
|
|
|
|
|
|
dist_base = of_iomap(node, 0);
|
|
|
|
if (!dist_base) {
|
|
|
|
pr_err("%s: unable to map gic dist registers\n",
|
|
|
|
node->full_name);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = gic_validate_dist_version(dist_base);
|
|
|
|
if (err) {
|
|
|
|
pr_err("%s: no distributor detected, giving up\n",
|
|
|
|
node->full_name);
|
|
|
|
goto out_unmap_dist;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
|
|
|
|
nr_redist_regions = 1;
|
|
|
|
|
|
|
|
rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL);
|
|
|
|
if (!rdist_regs) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_unmap_dist;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nr_redist_regions; i++) {
|
|
|
|
struct resource res;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = of_address_to_resource(node, 1 + i, &res);
|
|
|
|
rdist_regs[i].redist_base = of_iomap(node, 1 + i);
|
|
|
|
if (ret || !rdist_regs[i].redist_base) {
|
|
|
|
pr_err("%s: couldn't map region %d\n",
|
|
|
|
node->full_name, i);
|
|
|
|
err = -ENODEV;
|
|
|
|
goto out_unmap_rdist;
|
|
|
|
}
|
|
|
|
rdist_regs[i].phys_base = res.start;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
|
|
|
|
redist_stride = 0;
|
|
|
|
|
|
|
|
err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
|
|
|
|
redist_stride, &node->fwnode);
|
2016-04-11 08:57:54 +00:00
|
|
|
if (err)
|
|
|
|
goto out_unmap_rdist;
|
|
|
|
|
|
|
|
gic_populate_ppi_partitions(node);
|
Small release overall.
- x86: miscellaneous fixes, AVIC support (local APIC virtualization,
AMD version)
- s390: polling for interrupts after a VCPU goes to halted state is
now enabled for s390; use hardware provided information about facility
bits that do not need any hypervisor activity, and other fixes for
cpu models and facilities; improve perf output; floating interrupt
controller improvements.
- MIPS: miscellaneous fixes
- PPC: bugfixes only
- ARM: 16K page size support, generic firmware probing layer for
timer and GIC
Christoffer Dall (KVM-ARM maintainer) says:
"There are a few changes in this pull request touching things outside
KVM, but they should all carry the necessary acks and it made the
merge process much easier to do it this way."
though actually the irqchip maintainers' acks didn't make it into the
patches. Marc Zyngier, who is both irqchip and KVM-ARM maintainer,
later acked at http://mid.gmane.org/573351D1.4060303@arm.com
"more formally and for documentation purposes".
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.22 (GNU/Linux)
iQEcBAABAgAGBQJXPJjyAAoJEL/70l94x66DhioH/j4fwQ0FmfPSM9PArzaFHQdx
LNE3tU4+bobbsy1BJr4DiAaOUQn3DAgwUvGLWXdeLiOXtoWXBiFHKaxlqEsCA6iQ
xcTH1TgfxsVoqGQ6bT9X/2GCx70heYpcWG3f+zqBy7ZfFmQykLAC/HwOr52VQL8f
hUFi3YmTHcnorp0n5Xg+9r3+RBS4D/kTbtdn6+KCLnPJ0RcgNkI3/NcafTemoofw
Tkv8+YYFNvKV13qlIfVqxMa0GwWI3pP6YaNKhaS5XO8Pu16HuuF1JthJsUBDzwBa
RInp8R9MoXgsBYhLpz3jc9vWG7G9yDl5LehsD9KOUGOaFYJ7sQN+QZOusa6jFgA=
=llO5
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"Small release overall.
x86:
- miscellaneous fixes
- AVIC support (local APIC virtualization, AMD version)
s390:
- polling for interrupts after a VCPU goes to halted state is now
enabled for s390
- use hardware provided information about facility bits that do not
need any hypervisor activity, and other fixes for cpu models and
facilities
- improve perf output
- floating interrupt controller improvements.
MIPS:
- miscellaneous fixes
PPC:
- bugfixes only
ARM:
- 16K page size support
- generic firmware probing layer for timer and GIC
Christoffer Dall (KVM-ARM maintainer) says:
"There are a few changes in this pull request touching things
outside KVM, but they should all carry the necessary acks and it
made the merge process much easier to do it this way."
though actually the irqchip maintainers' acks didn't make it into the
patches. Marc Zyngier, who is both irqchip and KVM-ARM maintainer,
later acked at http://mid.gmane.org/573351D1.4060303@arm.com ('more
formally and for documentation purposes')"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (82 commits)
KVM: MTRR: remove MSR 0x2f8
KVM: x86: make hwapic_isr_update and hwapic_irr_update look the same
svm: Manage vcpu load/unload when enable AVIC
svm: Do not intercept CR8 when enable AVIC
svm: Do not expose x2APIC when enable AVIC
KVM: x86: Introducing kvm_x86_ops.apicv_post_state_restore
svm: Add VMEXIT handlers for AVIC
svm: Add interrupt injection via AVIC
KVM: x86: Detect and Initialize AVIC support
svm: Introduce new AVIC VMCB registers
KVM: split kvm_vcpu_wake_up from kvm_vcpu_kick
KVM: x86: Introducing kvm_x86_ops VCPU blocking/unblocking hooks
KVM: x86: Introducing kvm_x86_ops VM init/destroy hooks
KVM: x86: Rename kvm_apic_get_reg to kvm_lapic_get_reg
KVM: x86: Misc LAPIC changes to expose helper functions
KVM: shrink halt polling even more for invalid wakeups
KVM: s390: set halt polling to 80 microseconds
KVM: halt_polling: provide a way to qualify wakeups during poll
KVM: PPC: Book3S HV: Re-enable XICS fast path for irqfd-generated interrupts
kvm: Conditionally register IRQ bypass consumer
...
2016-05-19 18:27:09 +00:00
|
|
|
gic_of_setup_kvm_info(node);
|
2016-04-11 08:57:54 +00:00
|
|
|
return 0;
|
2016-01-19 13:11:14 +00:00
|
|
|
|
2014-06-30 15:01:31 +00:00
|
|
|
out_unmap_rdist:
|
2014-11-24 14:35:10 +00:00
|
|
|
for (i = 0; i < nr_redist_regions; i++)
|
|
|
|
if (rdist_regs[i].redist_base)
|
|
|
|
iounmap(rdist_regs[i].redist_base);
|
|
|
|
kfree(rdist_regs);
|
2014-06-30 15:01:31 +00:00
|
|
|
out_unmap_dist:
|
|
|
|
iounmap(dist_base);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
|
2016-01-19 13:11:15 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_ACPI
|
2016-04-11 15:32:56 +00:00
|
|
|
static struct
|
|
|
|
{
|
|
|
|
void __iomem *dist_base;
|
|
|
|
struct redist_region *redist_regs;
|
|
|
|
u32 nr_redist_regions;
|
|
|
|
bool single_redist;
|
2016-04-11 15:32:57 +00:00
|
|
|
u32 maint_irq;
|
|
|
|
int maint_irq_mode;
|
|
|
|
phys_addr_t vcpu_base;
|
2016-04-11 15:32:56 +00:00
|
|
|
} acpi_data __initdata;
|
2016-01-19 13:11:16 +00:00
|
|
|
|
|
|
|
static void __init
|
|
|
|
gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
|
|
|
|
{
|
|
|
|
static int count = 0;
|
|
|
|
|
2016-04-11 15:32:56 +00:00
|
|
|
acpi_data.redist_regs[count].phys_base = phys_base;
|
|
|
|
acpi_data.redist_regs[count].redist_base = redist_base;
|
|
|
|
acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
|
2016-01-19 13:11:16 +00:00
|
|
|
count++;
|
|
|
|
}
|
2016-01-19 13:11:15 +00:00
|
|
|
|
|
|
|
static int __init
|
|
|
|
gic_acpi_parse_madt_redist(struct acpi_subtable_header *header,
|
|
|
|
const unsigned long end)
|
|
|
|
{
|
|
|
|
struct acpi_madt_generic_redistributor *redist =
|
|
|
|
(struct acpi_madt_generic_redistributor *)header;
|
|
|
|
void __iomem *redist_base;
|
|
|
|
|
|
|
|
redist_base = ioremap(redist->base_address, redist->length);
|
|
|
|
if (!redist_base) {
|
|
|
|
pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:16 +00:00
|
|
|
gic_acpi_register_redist(redist->base_address, redist_base);
|
2016-01-19 13:11:15 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:16 +00:00
|
|
|
static int __init
|
|
|
|
gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header,
|
|
|
|
const unsigned long end)
|
|
|
|
{
|
|
|
|
struct acpi_madt_generic_interrupt *gicc =
|
|
|
|
(struct acpi_madt_generic_interrupt *)header;
|
2016-04-11 15:32:56 +00:00
|
|
|
u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
|
2016-01-19 13:11:16 +00:00
|
|
|
u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
|
|
|
|
void __iomem *redist_base;
|
|
|
|
|
|
|
|
redist_base = ioremap(gicc->gicr_base_address, size);
|
|
|
|
if (!redist_base)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init gic_acpi_collect_gicr_base(void)
|
|
|
|
{
|
|
|
|
acpi_tbl_entry_handler redist_parser;
|
|
|
|
enum acpi_madt_type type;
|
|
|
|
|
2016-04-11 15:32:56 +00:00
|
|
|
if (acpi_data.single_redist) {
|
2016-01-19 13:11:16 +00:00
|
|
|
type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
|
|
|
|
redist_parser = gic_acpi_parse_madt_gicc;
|
|
|
|
} else {
|
|
|
|
type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
|
|
|
|
redist_parser = gic_acpi_parse_madt_redist;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Collect redistributor base addresses in GICR entries */
|
|
|
|
if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pr_info("No valid GICR entries exist\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:15 +00:00
|
|
|
static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header,
|
|
|
|
const unsigned long end)
|
|
|
|
{
|
|
|
|
/* Subtable presence means that redist exists, that's it */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:16 +00:00
|
|
|
static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header,
|
|
|
|
const unsigned long end)
|
|
|
|
{
|
|
|
|
struct acpi_madt_generic_interrupt *gicc =
|
|
|
|
(struct acpi_madt_generic_interrupt *)header;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If GICC is enabled and has valid gicr base address, then it means
|
|
|
|
* GICR base is presented via GICC
|
|
|
|
*/
|
|
|
|
if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init gic_acpi_count_gicr_regions(void)
|
|
|
|
{
|
|
|
|
int count;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Count how many redistributor regions we have. It is not allowed
|
|
|
|
* to mix redistributor description, GICR and GICC subtables have to be
|
|
|
|
* mutually exclusive.
|
|
|
|
*/
|
|
|
|
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
|
|
|
|
gic_acpi_match_gicr, 0);
|
|
|
|
if (count > 0) {
|
2016-04-11 15:32:56 +00:00
|
|
|
acpi_data.single_redist = false;
|
2016-01-19 13:11:16 +00:00
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
|
|
|
|
gic_acpi_match_gicc, 0);
|
|
|
|
if (count > 0)
|
2016-04-11 15:32:56 +00:00
|
|
|
acpi_data.single_redist = true;
|
2016-01-19 13:11:16 +00:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:15 +00:00
|
|
|
static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
|
|
|
|
struct acpi_probe_entry *ape)
|
|
|
|
{
|
|
|
|
struct acpi_madt_generic_distributor *dist;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
dist = (struct acpi_madt_generic_distributor *)header;
|
|
|
|
if (dist->version != ape->driver_data)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* We need to do that exercise anyway, the sooner the better */
|
2016-01-19 13:11:16 +00:00
|
|
|
count = gic_acpi_count_gicr_regions();
|
2016-01-19 13:11:15 +00:00
|
|
|
if (count <= 0)
|
|
|
|
return false;
|
|
|
|
|
2016-04-11 15:32:56 +00:00
|
|
|
acpi_data.nr_redist_regions = count;
|
2016-01-19 13:11:15 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-04-11 15:32:57 +00:00
|
|
|
static int __init gic_acpi_parse_virt_madt_gicc(struct acpi_subtable_header *header,
|
|
|
|
const unsigned long end)
|
|
|
|
{
|
|
|
|
struct acpi_madt_generic_interrupt *gicc =
|
|
|
|
(struct acpi_madt_generic_interrupt *)header;
|
|
|
|
int maint_irq_mode;
|
|
|
|
static int first_madt = true;
|
|
|
|
|
|
|
|
/* Skip unusable CPUs */
|
|
|
|
if (!(gicc->flags & ACPI_MADT_ENABLED))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
|
|
|
|
ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
|
|
|
|
|
|
|
|
if (first_madt) {
|
|
|
|
first_madt = false;
|
|
|
|
|
|
|
|
acpi_data.maint_irq = gicc->vgic_interrupt;
|
|
|
|
acpi_data.maint_irq_mode = maint_irq_mode;
|
|
|
|
acpi_data.vcpu_base = gicc->gicv_base_address;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The maintenance interrupt and GICV should be the same for every CPU
|
|
|
|
*/
|
|
|
|
if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
|
|
|
|
(acpi_data.maint_irq_mode != maint_irq_mode) ||
|
|
|
|
(acpi_data.vcpu_base != gicc->gicv_base_address))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __init gic_acpi_collect_virt_info(void)
|
|
|
|
{
|
|
|
|
int count;
|
|
|
|
|
|
|
|
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
|
|
|
|
gic_acpi_parse_virt_madt_gicc, 0);
|
|
|
|
|
|
|
|
return (count > 0);
|
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:15 +00:00
|
|
|
#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
|
2016-04-11 15:32:57 +00:00
|
|
|
#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
|
|
|
|
#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
|
|
|
|
|
|
|
|
static void __init gic_acpi_setup_kvm_info(void)
|
|
|
|
{
|
|
|
|
int irq;
|
|
|
|
|
|
|
|
if (!gic_acpi_collect_virt_info()) {
|
|
|
|
pr_warn("Unable to get hardware information used for virtualization\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
gic_v3_kvm_info.type = GIC_V3;
|
|
|
|
|
|
|
|
irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
|
|
|
|
acpi_data.maint_irq_mode,
|
|
|
|
ACPI_ACTIVE_HIGH);
|
|
|
|
if (irq <= 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
gic_v3_kvm_info.maint_irq = irq;
|
|
|
|
|
|
|
|
if (acpi_data.vcpu_base) {
|
|
|
|
struct resource *vcpu = &gic_v3_kvm_info.vcpu;
|
|
|
|
|
|
|
|
vcpu->flags = IORESOURCE_MEM;
|
|
|
|
vcpu->start = acpi_data.vcpu_base;
|
|
|
|
vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
gic_set_kvm_info(&gic_v3_kvm_info);
|
|
|
|
}
|
2016-01-19 13:11:15 +00:00
|
|
|
|
|
|
|
static int __init
|
|
|
|
gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end)
|
|
|
|
{
|
|
|
|
struct acpi_madt_generic_distributor *dist;
|
|
|
|
struct fwnode_handle *domain_handle;
|
2016-04-11 15:32:56 +00:00
|
|
|
size_t size;
|
2016-01-19 13:11:16 +00:00
|
|
|
int i, err;
|
2016-01-19 13:11:15 +00:00
|
|
|
|
|
|
|
/* Get distributor base address */
|
|
|
|
dist = (struct acpi_madt_generic_distributor *)header;
|
2016-04-11 15:32:56 +00:00
|
|
|
acpi_data.dist_base = ioremap(dist->base_address,
|
|
|
|
ACPI_GICV3_DIST_MEM_SIZE);
|
|
|
|
if (!acpi_data.dist_base) {
|
2016-01-19 13:11:15 +00:00
|
|
|
pr_err("Unable to map GICD registers\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2016-04-11 15:32:56 +00:00
|
|
|
err = gic_validate_dist_version(acpi_data.dist_base);
|
2016-01-19 13:11:15 +00:00
|
|
|
if (err) {
|
2016-04-11 15:32:56 +00:00
|
|
|
pr_err("No distributor detected at @%p, giving up",
|
|
|
|
acpi_data.dist_base);
|
2016-01-19 13:11:15 +00:00
|
|
|
goto out_dist_unmap;
|
|
|
|
}
|
|
|
|
|
2016-04-11 15:32:56 +00:00
|
|
|
size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
|
|
|
|
acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!acpi_data.redist_regs) {
|
2016-01-19 13:11:15 +00:00
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_dist_unmap;
|
|
|
|
}
|
|
|
|
|
2016-01-19 13:11:16 +00:00
|
|
|
err = gic_acpi_collect_gicr_base();
|
|
|
|
if (err)
|
2016-01-19 13:11:15 +00:00
|
|
|
goto out_redist_unmap;
|
|
|
|
|
2016-04-11 15:32:56 +00:00
|
|
|
domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base);
|
2016-01-19 13:11:15 +00:00
|
|
|
if (!domain_handle) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_redist_unmap;
|
|
|
|
}
|
|
|
|
|
2016-04-11 15:32:56 +00:00
|
|
|
err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
|
|
|
|
acpi_data.nr_redist_regions, 0, domain_handle);
|
2016-01-19 13:11:15 +00:00
|
|
|
if (err)
|
|
|
|
goto out_fwhandle_free;
|
|
|
|
|
|
|
|
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
|
2016-04-11 15:32:57 +00:00
|
|
|
gic_acpi_setup_kvm_info();
|
|
|
|
|
2016-01-19 13:11:15 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_fwhandle_free:
|
|
|
|
irq_domain_free_fwnode(domain_handle);
|
|
|
|
out_redist_unmap:
|
2016-04-11 15:32:56 +00:00
|
|
|
for (i = 0; i < acpi_data.nr_redist_regions; i++)
|
|
|
|
if (acpi_data.redist_regs[i].redist_base)
|
|
|
|
iounmap(acpi_data.redist_regs[i].redist_base);
|
|
|
|
kfree(acpi_data.redist_regs);
|
2016-01-19 13:11:15 +00:00
|
|
|
out_dist_unmap:
|
2016-04-11 15:32:56 +00:00
|
|
|
iounmap(acpi_data.dist_base);
|
2016-01-19 13:11:15 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
|
|
|
|
acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
|
|
|
|
gic_acpi_init);
|
|
|
|
IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
|
|
|
|
acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
|
|
|
|
gic_acpi_init);
|
|
|
|
IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
|
|
|
|
acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
|
|
|
|
gic_acpi_init);
|
|
|
|
#endif
|