Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller
2018-11-19 10:55:00 -08:00
204 changed files with 1837 additions and 1129 deletions

View File

@@ -2138,6 +2138,10 @@ E: paul@laufernet.com
D: Soundblaster driver fixes, ISAPnP quirk D: Soundblaster driver fixes, ISAPnP quirk
S: California, USA S: California, USA
N: Jarkko Lavinen
E: jarkko.lavinen@nokia.com
D: OMAP MMC support
N: Jonathan Layes N: Jonathan Layes
D: ARPD support D: ARPD support

View File

@@ -150,7 +150,7 @@ data structures necessary to handle the given policy and, possibly, to add
a governor ``sysfs`` interface to it. Next, the governor is started by a governor ``sysfs`` interface to it. Next, the governor is started by
invoking its ``->start()`` callback. invoking its ``->start()`` callback.
That callback it expected to register per-CPU utilization update callbacks for That callback is expected to register per-CPU utilization update callbacks for
all of the online CPUs belonging to the given policy with the CPU scheduler. all of the online CPUs belonging to the given policy with the CPU scheduler.
The utilization update callbacks will be invoked by the CPU scheduler on The utilization update callbacks will be invoked by the CPU scheduler on
important events, like task enqueue and dequeue, on every iteration of the important events, like task enqueue and dequeue, on every iteration of the

View File

@@ -86,9 +86,11 @@ transitions.
This will give a fine grained information about all the CPU frequency This will give a fine grained information about all the CPU frequency
transitions. The cat output here is a two dimensional matrix, where an entry transitions. The cat output here is a two dimensional matrix, where an entry
<i,j> (row i, column j) represents the count of number of transitions from <i,j> (row i, column j) represents the count of number of transitions from
Freq_i to Freq_j. Freq_i is in descending order with increasing rows and Freq_i to Freq_j. Freq_i rows and Freq_j columns follow the sorting order in
Freq_j is in descending order with increasing columns. The output here also which the driver has provided the frequency table initially to the cpufreq core
contains the actual freq values for each row and column for better readability. and so can be sorted (ascending or descending) or unsorted. The output here
also contains the actual freq values for each row and column for better
readability.
If the transition table is bigger than PAGE_SIZE, reading this will If the transition table is bigger than PAGE_SIZE, reading this will
return an -EFBIG error. return an -EFBIG error.

View File

@@ -1,65 +0,0 @@
Generic ARM big LITTLE cpufreq driver's DT glue
-----------------------------------------------
This is DT specific glue layer for generic cpufreq driver for big LITTLE
systems.
Both required and optional properties listed below must be defined
under node /cpus/cpu@x. Where x is the first cpu inside a cluster.
FIXME: Cpus should boot in the order specified in DT and all cpus for a cluster
must be present contiguously. Generic DT driver will check only node 'x' for
cpu:x.
Required properties:
- operating-points: Refer to Documentation/devicetree/bindings/opp/opp.txt
for details
Optional properties:
- clock-latency: Specify the possible maximum transition latency for clock,
in unit of nanoseconds.
Examples:
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
compatible = "arm,cortex-a15";
reg = <0>;
next-level-cache = <&L2>;
operating-points = <
/* kHz uV */
792000 1100000
396000 950000
198000 850000
>;
clock-latency = <61036>; /* two CLK32 periods */
};
cpu@1 {
compatible = "arm,cortex-a15";
reg = <1>;
next-level-cache = <&L2>;
};
cpu@100 {
compatible = "arm,cortex-a7";
reg = <100>;
next-level-cache = <&L2>;
operating-points = <
/* kHz uV */
792000 950000
396000 750000
198000 450000
>;
clock-latency = <61036>; /* two CLK32 periods */
};
cpu@101 {
compatible = "arm,cortex-a7";
reg = <101>;
next-level-cache = <&L2>;
};
};

View File

@@ -17,7 +17,7 @@ Example:
reg = <1>; reg = <1>;
clocks = <&clk32m>; clocks = <&clk32m>;
interrupt-parent = <&gpio4>; interrupt-parent = <&gpio4>;
interrupts = <13 IRQ_TYPE_EDGE_RISING>; interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
vdd-supply = <&reg5v0>; vdd-supply = <&reg5v0>;
xceiver-supply = <&reg5v0>; xceiver-supply = <&reg5v0>;
}; };

View File

@@ -5,6 +5,7 @@ Required properties:
- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC. - compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
"renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC. "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
"renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC. "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
"renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
"renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC. "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
"renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC. "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
"renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC. "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
@@ -14,26 +15,32 @@ Required properties:
"renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC. "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
"renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC. "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
"renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC. "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
"renesas,can-r8a77965" if CAN controller is a part of R8A77965 SoC.
"renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device. "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
"renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1 "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
compatible device. compatible device.
"renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device. "renesas,rcar-gen3-can" for a generic R-Car Gen3 or RZ/G2
compatible device.
When compatible with the generic version, nodes must list the When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first SoC-specific version corresponding to the platform first
followed by the generic version. followed by the generic version.
- reg: physical base address and size of the R-Car CAN register map. - reg: physical base address and size of the R-Car CAN register map.
- interrupts: interrupt specifier for the sole interrupt. - interrupts: interrupt specifier for the sole interrupt.
- clocks: phandles and clock specifiers for 3 CAN clock inputs. - clocks: phandles and clock specifiers for 2 CAN clock inputs for RZ/G2
- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk". devices.
phandles and clock specifiers for 3 CAN clock inputs for every other
SoC.
- clock-names: 2 clock input name strings for RZ/G2: "clkp1", "can_clk".
3 clock input name strings for every other SoC: "clkp1", "clkp2",
"can_clk".
- pinctrl-0: pin control group to be used for this controller. - pinctrl-0: pin control group to be used for this controller.
- pinctrl-names: must be "default". - pinctrl-names: must be "default".
Required properties for "renesas,can-r8a7795" and "renesas,can-r8a7796" Required properties for R8A7795, R8A7796 and R8A77965:
compatible: For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
In R8A7795 and R8A7796 SoCs, "clkp2" can be CANFD clock. This is a div6 clock be used by both CAN and CAN FD controller at the same time. It needs to be
and can be used by both CAN and CAN FD controller at the same time. It needs to scaled to maximum frequency if any of these controllers use it. This is done
be scaled to maximum frequency if any of these controllers use it. This is done
using the below properties: using the below properties:
- assigned-clocks: phandle of clkp2(CANFD) clock. - assigned-clocks: phandle of clkp2(CANFD) clock.
@@ -42,8 +49,9 @@ using the below properties:
Optional properties: Optional properties:
- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are: - renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
<0x0> (default) : Peripheral clock (clkp1) <0x0> (default) : Peripheral clock (clkp1)
<0x1> : Peripheral clock (clkp2) <0x1> : Peripheral clock (clkp2) (not supported by
<0x3> : Externally input clock RZ/G2 devices)
<0x3> : External input clock
Example Example
------- -------

View File

@@ -1056,18 +1056,23 @@ The kernel interface functions are as follows:
u32 rxrpc_kernel_check_life(struct socket *sock, u32 rxrpc_kernel_check_life(struct socket *sock,
struct rxrpc_call *call); struct rxrpc_call *call);
void rxrpc_kernel_probe_life(struct socket *sock,
struct rxrpc_call *call);
This returns a number that is updated when ACKs are received from the peer The first function returns a number that is updated when ACKs are received
(notably including PING RESPONSE ACKs which we can elicit by sending PING from the peer (notably including PING RESPONSE ACKs which we can elicit by
ACKs to see if the call still exists on the server). The caller should sending PING ACKs to see if the call still exists on the server). The
compare the numbers of two calls to see if the call is still alive after caller should compare the numbers of two calls to see if the call is still
waiting for a suitable interval. alive after waiting for a suitable interval.
This allows the caller to work out if the server is still contactable and This allows the caller to work out if the server is still contactable and
if the call is still alive on the server whilst waiting for the server to if the call is still alive on the server whilst waiting for the server to
process a client operation. process a client operation.
This function may transmit a PING ACK. The second function causes a ping ACK to be transmitted to try to provoke
the peer into responding, which would then cause the value returned by the
first function to change. Note that this must be called in TASK_RUNNING
state.
(*) Get reply timestamp. (*) Get reply timestamp.

View File

@@ -717,7 +717,7 @@ F: include/linux/mfd/altera-a10sr.h
F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Vince Bridgers <vbridger@opensource.altera.com> M: Thor Thayer <thor.thayer@linux.intel.com>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers) L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
S: Maintained S: Maintained
@@ -3276,6 +3276,12 @@ F: include/uapi/linux/caif/
F: include/net/caif/ F: include/net/caif/
F: net/caif/ F: net/caif/
CAKE QDISC
M: Toke Høiland-Jørgensen <toke@toke.dk>
L: cake@lists.bufferbloat.net (moderated for non-subscribers)
S: Maintained
F: net/sched/sch_cake.c
CALGARY x86-64 IOMMU CALGARY x86-64 IOMMU
M: Muli Ben-Yehuda <mulix@mulix.org> M: Muli Ben-Yehuda <mulix@mulix.org>
M: Jon Mason <jdmason@kudzu.us> M: Jon Mason <jdmason@kudzu.us>
@@ -10809,9 +10815,9 @@ F: drivers/media/platform/omap3isp/
F: drivers/staging/media/omap4iss/ F: drivers/staging/media/omap4iss/
OMAP MMC SUPPORT OMAP MMC SUPPORT
M: Jarkko Lavinen <jarkko.lavinen@nokia.com> M: Aaro Koskinen <aaro.koskinen@iki.fi>
L: linux-omap@vger.kernel.org L: linux-omap@vger.kernel.org
S: Maintained S: Odd Fixes
F: drivers/mmc/host/omap.c F: drivers/mmc/host/omap.c
OMAP POWER MANAGEMENT SUPPORT OMAP POWER MANAGEMENT SUPPORT
@@ -11746,6 +11752,7 @@ F: Documentation/devicetree/bindings/pinctrl/fsl,*
PIN CONTROLLER - INTEL PIN CONTROLLER - INTEL
M: Mika Westerberg <mika.westerberg@linux.intel.com> M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
S: Maintained S: Maintained
F: drivers/pinctrl/intel/ F: drivers/pinctrl/intel/

View File

@@ -2,7 +2,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 20 PATCHLEVEL = 20
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc2 EXTRAVERSION = -rc3
NAME = "People's Front" NAME = "People's Front"
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@@ -111,6 +111,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
extern unsigned int processor_id; extern unsigned int processor_id;
struct proc_info_list *lookup_processor(u32 midr);
#ifdef CONFIG_CPU_CP15 #ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \ #define read_cpuid(reg) \

View File

@@ -23,7 +23,7 @@ struct mm_struct;
/* /*
* Don't change this structure - ASM code relies on it. * Don't change this structure - ASM code relies on it.
*/ */
extern struct processor { struct processor {
/* MISC /* MISC
* get data abort address/flags * get data abort address/flags
*/ */
@@ -79,9 +79,13 @@ extern struct processor {
unsigned int suspend_size; unsigned int suspend_size;
void (*do_suspend)(void *); void (*do_suspend)(void *);
void (*do_resume)(void *); void (*do_resume)(void *);
} processor; };
#ifndef MULTI_CPU #ifndef MULTI_CPU
static inline void init_proc_vtable(const struct processor *p)
{
}
extern void cpu_proc_init(void); extern void cpu_proc_init(void);
extern void cpu_proc_fin(void); extern void cpu_proc_fin(void);
extern int cpu_do_idle(void); extern int cpu_do_idle(void);
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
extern void cpu_do_suspend(void *); extern void cpu_do_suspend(void *);
extern void cpu_do_resume(void *); extern void cpu_do_resume(void *);
#else #else
#define cpu_proc_init processor._proc_init
#define cpu_proc_fin processor._proc_fin
#define cpu_reset processor.reset
#define cpu_do_idle processor._do_idle
#define cpu_dcache_clean_area processor.dcache_clean_area
#define cpu_set_pte_ext processor.set_pte_ext
#define cpu_do_switch_mm processor.switch_mm
/* These three are private to arch/arm/kernel/suspend.c */ extern struct processor processor;
#define cpu_do_suspend processor.do_suspend #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
#define cpu_do_resume processor.do_resume #include <linux/smp.h>
/*
* This can't be a per-cpu variable because we need to access it before
* per-cpu has been initialised. We have a couple of functions that are
* called in a pre-emptible context, and so can't use smp_processor_id()
* there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
* function pointers for these are identical across all CPUs.
*/
extern struct processor *cpu_vtable[];
#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
#define PROC_TABLE(f) cpu_vtable[0]->f
static inline void init_proc_vtable(const struct processor *p)
{
unsigned int cpu = smp_processor_id();
*cpu_vtable[cpu] = *p;
WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
cpu_vtable[0]->dcache_clean_area);
WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
cpu_vtable[0]->set_pte_ext);
}
#else
#define PROC_VTABLE(f) processor.f
#define PROC_TABLE(f) processor.f
static inline void init_proc_vtable(const struct processor *p)
{
processor = *p;
}
#endif
#define cpu_proc_init PROC_VTABLE(_proc_init)
#define cpu_check_bugs PROC_VTABLE(check_bugs)
#define cpu_proc_fin PROC_VTABLE(_proc_fin)
#define cpu_reset PROC_VTABLE(reset)
#define cpu_do_idle PROC_VTABLE(_do_idle)
#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
/* These two are private to arch/arm/kernel/suspend.c */
#define cpu_do_suspend PROC_VTABLE(do_suspend)
#define cpu_do_resume PROC_VTABLE(do_resume)
#endif #endif
extern void cpu_resume(void); extern void cpu_resume(void);

View File

@@ -6,8 +6,8 @@
void check_other_bugs(void) void check_other_bugs(void)
{ {
#ifdef MULTI_CPU #ifdef MULTI_CPU
if (processor.check_bugs) if (cpu_check_bugs)
processor.check_bugs(); cpu_check_bugs();
#endif #endif
} }

View File

@@ -145,6 +145,9 @@ __mmap_switched_data:
#endif #endif
.size __mmap_switched_data, . - __mmap_switched_data .size __mmap_switched_data, . - __mmap_switched_data
__FINIT
.text
/* /*
* This provides a C-API version of __lookup_processor_type * This provides a C-API version of __lookup_processor_type
*/ */
@@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
ldmfd sp!, {r4 - r6, r9, pc} ldmfd sp!, {r4 - r6, r9, pc}
ENDPROC(lookup_processor_type) ENDPROC(lookup_processor_type)
__FINIT
.text
/* /*
* Read processor ID register (CP#15, CR0), and look up in the linker-built * Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses * supported processor list. Note that we can't use the absolute addresses

View File

@@ -114,6 +114,11 @@ EXPORT_SYMBOL(elf_hwcap2);
#ifdef MULTI_CPU #ifdef MULTI_CPU
struct processor processor __ro_after_init; struct processor processor __ro_after_init;
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
struct processor *cpu_vtable[NR_CPUS] = {
[0] = &processor,
};
#endif
#endif #endif
#ifdef MULTI_TLB #ifdef MULTI_TLB
struct cpu_tlb_fns cpu_tlb __ro_after_init; struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -666,28 +671,33 @@ static void __init smp_build_mpidr_hash(void)
} }
#endif #endif
/*
* locate processor in the list of supported processor types. The linker
* builds this table for us from the entries in arch/arm/mm/proc-*.S
*/
struct proc_info_list *lookup_processor(u32 midr)
{
struct proc_info_list *list = lookup_processor_type(midr);
if (!list) {
pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
smp_processor_id(), midr);
while (1)
/* can't use cpu_relax() here as it may require MMU setup */;
}
return list;
}
static void __init setup_processor(void) static void __init setup_processor(void)
{ {
struct proc_info_list *list; unsigned int midr = read_cpuid_id();
struct proc_info_list *list = lookup_processor(midr);
/*
* locate processor in the list of supported processor
* types. The linker builds this table for us from the
* entries in arch/arm/mm/proc-*.S
*/
list = lookup_processor_type(read_cpuid_id());
if (!list) {
pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
read_cpuid_id());
while (1);
}
cpu_name = list->cpu_name; cpu_name = list->cpu_name;
__cpu_architecture = __get_cpu_architecture(); __cpu_architecture = __get_cpu_architecture();
#ifdef MULTI_CPU init_proc_vtable(list->proc);
processor = *list->proc;
#endif
#ifdef MULTI_TLB #ifdef MULTI_TLB
cpu_tlb = *list->tlb; cpu_tlb = *list->tlb;
#endif #endif
@@ -699,7 +709,7 @@ static void __init setup_processor(void)
#endif #endif
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15, list->cpu_name, midr, midr & 15,
proc_arch[cpu_architecture()], get_cr()); proc_arch[cpu_architecture()], get_cr());
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",

View File

@@ -42,6 +42,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/procinfo.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
#endif #endif
} }
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
static int secondary_biglittle_prepare(unsigned int cpu)
{
if (!cpu_vtable[cpu])
cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
return cpu_vtable[cpu] ? 0 : -ENOMEM;
}
static void secondary_biglittle_init(void)
{
init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
}
#else
static int secondary_biglittle_prepare(unsigned int cpu)
{
return 0;
}
static void secondary_biglittle_init(void)
{
}
#endif
int __cpu_up(unsigned int cpu, struct task_struct *idle) int __cpu_up(unsigned int cpu, struct task_struct *idle)
{ {
int ret; int ret;
@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
if (!smp_ops.smp_boot_secondary) if (!smp_ops.smp_boot_secondary)
return -ENOSYS; return -ENOSYS;
ret = secondary_biglittle_prepare(cpu);
if (ret)
return ret;
/* /*
* We need to tell the secondary core where to find * We need to tell the secondary core where to find
* its stack and the page tables. * its stack and the page tables.
@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
struct mm_struct *mm = &init_mm; struct mm_struct *mm = &init_mm;
unsigned int cpu; unsigned int cpu;
secondary_biglittle_init();
/* /*
* The identity mapping is uncached (strongly ordered), so * The identity mapping is uncached (strongly ordered), so
* switch away from it before attempting any exclusive accesses. * switch away from it before attempting any exclusive accesses.

View File

@@ -209,11 +209,61 @@ static int __init omapdss_init_fbdev(void)
return 0; return 0;
} }
#else
static inline int omapdss_init_fbdev(void) static const char * const omapdss_compat_names[] __initconst = {
"ti,omap2-dss",
"ti,omap3-dss",
"ti,omap4-dss",
"ti,omap5-dss",
"ti,dra7-dss",
};
static struct device_node * __init omapdss_find_dss_of_node(void)
{ {
return 0; struct device_node *node;
int i;
for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
node = of_find_compatible_node(NULL, NULL,
omapdss_compat_names[i]);
if (node)
return node;
}
return NULL;
} }
static int __init omapdss_init_of(void)
{
int r;
struct device_node *node;
struct platform_device *pdev;
/* only create dss helper devices if dss is enabled in the .dts */
node = omapdss_find_dss_of_node();
if (!node)
return 0;
if (!of_device_is_available(node))
return 0;
pdev = of_find_device_by_node(node);
if (!pdev) {
pr_err("Unable to find DSS platform device\n");
return -ENODEV;
}
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (r) {
pr_err("Unable to populate DSS submodule devices\n");
return r;
}
return omapdss_init_fbdev();
}
omap_device_initcall(omapdss_init_of);
#endif /* CONFIG_FB_OMAP2 */ #endif /* CONFIG_FB_OMAP2 */
static void dispc_disable_outputs(void) static void dispc_disable_outputs(void)
@@ -361,58 +411,3 @@ int omap_dss_reset(struct omap_hwmod *oh)
return r; return r;
} }
static const char * const omapdss_compat_names[] __initconst = {
"ti,omap2-dss",
"ti,omap3-dss",
"ti,omap4-dss",
"ti,omap5-dss",
"ti,dra7-dss",
};
static struct device_node * __init omapdss_find_dss_of_node(void)
{
struct device_node *node;
int i;
for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
node = of_find_compatible_node(NULL, NULL,
omapdss_compat_names[i]);
if (node)
return node;
}
return NULL;
}
static int __init omapdss_init_of(void)
{
int r;
struct device_node *node;
struct platform_device *pdev;
/* only create dss helper devices if dss is enabled in the .dts */
node = omapdss_find_dss_of_node();
if (!node)
return 0;
if (!of_device_is_available(node))
return 0;
pdev = of_find_device_by_node(node);
if (!pdev) {
pr_err("Unable to find DSS platform device\n");
return -ENODEV;
}
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
if (r) {
pr_err("Unable to populate DSS submodule devices\n");
return r;
}
return omapdss_init_fbdev();
}
omap_device_initcall(omapdss_init_of);

View File

@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A17: case ARM_CPU_PART_CORTEX_A17:
case ARM_CPU_PART_CORTEX_A73: case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75: case ARM_CPU_PART_CORTEX_A75:
if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) = per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_bpiall; harden_branch_predictor_bpiall;
spectre_v2_method = "BPIALL"; spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A15: case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15: case ARM_CPU_PART_BRAHMA_B15:
if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) = per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_iciallu; harden_branch_predictor_iciallu;
spectre_v2_method = "ICIALLU"; spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
ARM_SMCCC_ARCH_WORKAROUND_1, &res); ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0) if ((int)res.a0 != 0)
break; break;
if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) = per_cpu(harden_branch_predictor_fn, cpu) =
call_hvc_arch_workaround_1; call_hvc_arch_workaround_1;
processor.switch_mm = cpu_v7_hvc_switch_mm; cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
spectre_v2_method = "hypervisor"; spectre_v2_method = "hypervisor";
break; break;
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
ARM_SMCCC_ARCH_WORKAROUND_1, &res); ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0) if ((int)res.a0 != 0)
break; break;
if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) = per_cpu(harden_branch_predictor_fn, cpu) =
call_smc_arch_workaround_1; call_smc_arch_workaround_1;
processor.switch_mm = cpu_v7_smc_switch_mm; cpu_do_switch_mm = cpu_v7_smc_switch_mm;
spectre_v2_method = "firmware"; spectre_v2_method = "firmware";
break; break;
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
if (spectre_v2_method) if (spectre_v2_method)
pr_info("CPU%u: Spectre v2: using %s workaround\n", pr_info("CPU%u: Spectre v2: using %s workaround\n",
smp_processor_id(), spectre_v2_method); smp_processor_id(), spectre_v2_method);
return;
bl_error:
pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
cpu);
} }
#else #else
static void cpu_v7_spectre_init(void) static void cpu_v7_spectre_init(void)

View File

@@ -573,7 +573,7 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
*/ */
ufp_exc->fpexc = hwstate->fpexc; ufp_exc->fpexc = hwstate->fpexc;
ufp_exc->fpinst = hwstate->fpinst; ufp_exc->fpinst = hwstate->fpinst;
ufp_exc->fpinst2 = ufp_exc->fpinst2; ufp_exc->fpinst2 = hwstate->fpinst2;
/* Ensure that VFP is disabled. */ /* Ensure that VFP is disabled. */
vfp_flush_hwstate(thread); vfp_flush_hwstate(thread);

View File

@@ -313,6 +313,7 @@ void __init setup_arch(char **cmdline_p)
arm64_memblock_init(); arm64_memblock_init();
paging_init(); paging_init();
efi_apply_persistent_mem_reservations();
acpi_table_upgrade(); acpi_table_upgrade();

View File

@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
volatile unsigned int *a; volatile unsigned int *a;
a = __ldcw_align(x); a = __ldcw_align(x);
/* Release with ordered store. */ mb();
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); *a = 1;
} }
static inline int arch_spin_trylock(arch_spinlock_t *x) static inline int arch_spin_trylock(arch_spinlock_t *x)

View File

@@ -640,7 +640,8 @@ cas_action:
sub,<> %r28, %r25, %r0 sub,<> %r28, %r25, %r0
2: stw %r24, 0(%r26) 2: stw %r24, 0(%r26)
/* Free lock */ /* Free lock */
stw,ma %r20, 0(%sr2,%r20) sync
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
/* Clear thread register indicator */ /* Clear thread register indicator */
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
@@ -654,7 +655,8 @@ cas_action:
3: 3:
/* Error occurred on load or store */ /* Error occurred on load or store */
/* Free lock */ /* Free lock */
stw,ma %r20, 0(%sr2,%r20) sync
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
#endif #endif
@@ -855,7 +857,8 @@ cas2_action:
cas2_end: cas2_end:
/* Free lock */ /* Free lock */
stw,ma %r20, 0(%sr2,%r20) sync
stw %r20, 0(%sr2,%r20)
/* Enable interrupts */ /* Enable interrupts */
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
/* Return to userspace, set no error */ /* Return to userspace, set no error */
@@ -865,7 +868,8 @@ cas2_end:
22: 22:
/* Error occurred on load or store */ /* Error occurred on load or store */
/* Free lock */ /* Free lock */
stw,ma %r20, 0(%sr2,%r20) sync
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
ldo 1(%r0),%r28 ldo 1(%r0),%r28
b lws_exit b lws_exit

View File

@@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
* their hooks, a bitfield is reserved for use by the platform near the * their hooks, a bitfield is reserved for use by the platform near the
* top of MMIO addresses (not PIO, those have to cope the hard way). * top of MMIO addresses (not PIO, those have to cope the hard way).
* *
* This bit field is 12 bits and is at the top of the IO virtual * The highest address in the kernel virtual space are:
* addresses PCI_IO_INDIRECT_TOKEN_MASK.
* *
* The kernel virtual space is thus: * d0003fffffffffff # with Hash MMU
* c00fffffffffffff # with Radix MMU
* *
* 0xD000000000000000 : vmalloc * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
* 0xD000080000000000 : PCI PHB IO space * that can be used for the field.
* 0xD000080080000000 : ioremap
* 0xD0000fffffffffff : end of ioremap region
*
* Since the top 4 bits are reserved as the region ID, we use thus
* the next 12 bits and keep 4 bits available for the future if the
* virtual address space is ever to be extended.
* *
* The direct IO mapping operations will then mask off those bits * The direct IO mapping operations will then mask off those bits
* before doing the actual access, though that only happen when * before doing the actual access, though that only happen when
@@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
*/ */
#ifdef CONFIG_PPC_INDIRECT_MMIO #ifdef CONFIG_PPC_INDIRECT_MMIO
#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul #define PCI_IO_IND_TOKEN_SHIFT 52
#define PCI_IO_IND_TOKEN_SHIFT 48 #define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
#define PCI_FIX_ADDR(addr) \ #define PCI_FIX_ADDR(addr) \
((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
#define PCI_GET_ADDR_TOKEN(addr) \ #define PCI_GET_ADDR_TOKEN(addr) \

View File

@@ -493,6 +493,8 @@
__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ #define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
__PPC_RT(t) | __PPC_RB(b)) __PPC_RT(t) | __PPC_RB(b))
#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
___PPC_RT(t) | ___PPC_RB(b))
#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \ #define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
__PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
/* PASemi instructions */ /* PASemi instructions */

View File

@@ -54,6 +54,7 @@ struct pt_regs
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
unsigned long ppr; unsigned long ppr;
unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */
#endif #endif
}; };
#endif #endif

View File

@@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
{ {
unsigned long pa; unsigned long pa;
BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
early_cpu_to_node(cpu), MEMBLOCK_NONE); early_cpu_to_node(cpu), MEMBLOCK_NONE);
if (!pa) { if (!pa) {

View File

@@ -6,8 +6,6 @@
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm #define TRACE_SYSTEM kvm
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace
/* /*
* Tracepoint for guest mode entry. * Tracepoint for guest mode entry.
@@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
/* This part must be outside protection */ /* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@@ -6,8 +6,6 @@
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_booke #define TRACE_SYSTEM kvm_booke
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_booke
#define kvm_trace_symbol_exit \ #define kvm_trace_symbol_exit \
{0, "CRITICAL"}, \ {0, "CRITICAL"}, \
@@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
#endif #endif
/* This part must be outside protection */ /* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_booke
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@@ -9,8 +9,6 @@
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_hv #define TRACE_SYSTEM kvm_hv
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_hv
#define kvm_trace_symbol_hcall \ #define kvm_trace_symbol_hcall \
{H_REMOVE, "H_REMOVE"}, \ {H_REMOVE, "H_REMOVE"}, \
@@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
#endif /* _TRACE_KVM_HV_H */ #endif /* _TRACE_KVM_HV_H */
/* This part must be outside protection */ /* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_hv
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@@ -8,8 +8,6 @@
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_pr #define TRACE_SYSTEM kvm_pr
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_pr
TRACE_EVENT(kvm_book3s_reenter, TRACE_EVENT(kvm_book3s_reenter,
TP_PROTO(int r, struct kvm_vcpu *vcpu), TP_PROTO(int r, struct kvm_vcpu *vcpu),
@@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit,
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
/* This part must be outside protection */ /* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace_pr
#include <trace/define_trace.h> #include <trace/define_trace.h>

View File

@@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu,
switch (rc) { switch (rc) {
case H_FUNCTION: case H_FUNCTION:
printk(KERN_INFO printk_once(KERN_INFO
"VPHN is not supported. Disabling polling...\n"); "VPHN is not supported. Disabling polling...\n");
stop_topology_update(); stop_topology_update();
break; break;

View File

@@ -19,6 +19,7 @@
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/paca.h> #include <asm/paca.h>
#include <asm/ppc-opcode.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/smp.h> #include <asm/smp.h>
@@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
} }
static void assert_slb_exists(unsigned long ea) static void assert_slb_presence(bool present, unsigned long ea)
{ {
#ifdef CONFIG_DEBUG_VM #ifdef CONFIG_DEBUG_VM
unsigned long tmp; unsigned long tmp;
WARN_ON_ONCE(mfmsr() & MSR_EE); WARN_ON_ONCE(mfmsr() & MSR_EE);
asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); if (!cpu_has_feature(CPU_FTR_ARCH_206))
WARN_ON(tmp == 0); return;
#endif
}
static void assert_slb_notexists(unsigned long ea) asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
{
#ifdef CONFIG_DEBUG_VM
unsigned long tmp;
WARN_ON_ONCE(mfmsr() & MSR_EE); WARN_ON(present == (tmp == 0));
asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
WARN_ON(tmp != 0);
#endif #endif
} }
@@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
*/ */
slb_shadow_update(ea, ssize, flags, index); slb_shadow_update(ea, ssize, flags, index);
assert_slb_notexists(ea); assert_slb_presence(false, ea);
asm volatile("slbmte %0,%1" : asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, ssize, flags)), : "r" (mk_vsid_data(ea, ssize, flags)),
"r" (mk_esid_data(ea, ssize, index)) "r" (mk_esid_data(ea, ssize, index))
@@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
"r" (be64_to_cpu(p->save_area[index].esid))); "r" (be64_to_cpu(p->save_area[index].esid)));
} }
assert_slb_exists(local_paca->kstack); assert_slb_presence(true, local_paca->kstack);
} }
/* /*
@@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
:: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
: "memory"); : "memory");
assert_slb_exists(get_paca()->kstack); assert_slb_presence(true, get_paca()->kstack);
get_paca()->slb_cache_ptr = 0; get_paca()->slb_cache_ptr = 0;
@@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
ea = (unsigned long) ea = (unsigned long)
get_paca()->slb_cache[i] << SID_SHIFT; get_paca()->slb_cache[i] << SID_SHIFT;
/* /*
* Could assert_slb_exists here, but hypervisor * Could assert_slb_presence(true) here, but
* or machine check could have come in and * hypervisor or machine check could have come
* removed the entry at this point. * in and removed the entry at this point.
*/ */
slbie_data = ea; slbie_data = ea;
@@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
* User preloads should add isync afterwards in case the kernel * User preloads should add isync afterwards in case the kernel
* accesses user memory before it returns to userspace with rfid. * accesses user memory before it returns to userspace with rfid.
*/ */
assert_slb_notexists(ea); assert_slb_presence(false, ea);
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
barrier(); barrier();
@@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
return -EFAULT; return -EFAULT;
if (ea < H_VMALLOC_END) if (ea < H_VMALLOC_END)
flags = get_paca()->vmalloc_sllp; flags = local_paca->vmalloc_sllp;
else else
flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
} else { } else {

View File

@@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
} }
EXPORT_SYMBOL(pnv_pci_get_npu_dev); EXPORT_SYMBOL(pnv_pci_get_npu_dev);
#define NPU_DMA_OP_UNSUPPORTED() \
dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
__func__)
static void *dma_npu_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
return NULL;
}
static void dma_npu_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
}
static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
return 0;
}
static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
return 0;
}
static int dma_npu_dma_supported(struct device *dev, u64 mask)
{
NPU_DMA_OP_UNSUPPORTED();
return 0;
}
static u64 dma_npu_get_required_mask(struct device *dev)
{
NPU_DMA_OP_UNSUPPORTED();
return 0;
}
static const struct dma_map_ops dma_npu_ops = {
.map_page = dma_npu_map_page,
.map_sg = dma_npu_map_sg,
.alloc = dma_npu_alloc,
.free = dma_npu_free,
.dma_supported = dma_npu_dma_supported,
.get_required_mask = dma_npu_get_required_mask,
};
/* /*
* Returns the PE assoicated with the PCI device of the given * Returns the PE assoicated with the PCI device of the given
* NPU. Returns the linked pci device if pci_dev != NULL. * NPU. Returns the linked pci device if pci_dev != NULL.
@@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
/* /*
* We don't initialise npu_pe->tce32_table as we always use * NVLink devices use the same TCE table configuration as
* dma_npu_ops which are nops. * their parent device so drivers shouldn't be doing DMA
* operations directly on these devices.
*/ */
set_dma_ops(&npe->pdev->dev, &dma_npu_ops); set_dma_ops(&npe->pdev->dev, NULL);
} }
/* /*

View File

@@ -77,4 +77,8 @@ core-y += arch/riscv/kernel/ arch/riscv/mm/
libs-y += arch/riscv/lib/ libs-y += arch/riscv/lib/
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
all: vmlinux all: vmlinux

View File

@@ -76,4 +76,5 @@ CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_PRINTK_TIME=y
# CONFIG_RCU_TRACE is not set # CONFIG_RCU_TRACE is not set

View File

@@ -56,8 +56,8 @@ struct pt_regs {
unsigned long sstatus; unsigned long sstatus;
unsigned long sbadaddr; unsigned long sbadaddr;
unsigned long scause; unsigned long scause;
/* a0 value before the syscall */ /* a0 value before the syscall */
unsigned long orig_a0; unsigned long orig_a0;
}; };
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT

View File

@@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
{ {
if (v != (u32)v) { if (v != (u32)v) {
pr_err("%s: value %016llx out of range for 32-bit field\n", pr_err("%s: value %016llx out of range for 32-bit field\n",
me->name, v); me->name, (long long)v);
return -EINVAL; return -EINVAL;
} }
*location = v; *location = v;
@@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
if (offset != (s32)offset) { if (offset != (s32)offset) {
pr_err( pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, v, location); me->name, (long long)v, location);
return -EINVAL; return -EINVAL;
} }
@@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
if (IS_ENABLED(CMODEL_MEDLOW)) { if (IS_ENABLED(CMODEL_MEDLOW)) {
pr_err( pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, v, location); me->name, (long long)v, location);
return -EINVAL; return -EINVAL;
} }
@@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
} else { } else {
pr_err( pr_err(
"%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
me->name, v, location); me->name, (long long)v, location);
return -EINVAL; return -EINVAL;
} }
@@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
} else { } else {
pr_err( pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, v, location); me->name, (long long)v, location);
return -EINVAL; return -EINVAL;
} }
} }
@@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
if (offset != fill_v) { if (offset != fill_v) {
pr_err( pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
me->name, v, location); me->name, (long long)v, location);
return -EINVAL; return -EINVAL;
} }

View File

@@ -3,6 +3,6 @@ lib-y += memcpy.o
lib-y += memset.o lib-y += memset.o
lib-y += uaccess.o lib-y += uaccess.o
lib-(CONFIG_64BIT) += tishift.o lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_32BIT) += udivdi3.o lib-$(CONFIG_32BIT) += udivdi3.o

View File

@@ -129,8 +129,15 @@ struct intel_uncore_box {
struct intel_uncore_extra_reg shared_regs[0]; struct intel_uncore_extra_reg shared_regs[0];
}; };
#define UNCORE_BOX_FLAG_INITIATED 0 /* CFL uncore 8th cbox MSRs */
#define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */ #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
#define CFL_UNC_CBO_7_PER_CTR0 0xf76
#define UNCORE_BOX_FLAG_INITIATED 0
/* event config registers are 8-byte apart */
#define UNCORE_BOX_FLAG_CTL_OFFS8 1
/* CFL 8th CBOX has different MSR space */
#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
struct uncore_event_desc { struct uncore_event_desc {
struct kobj_attribute attr; struct kobj_attribute attr;
@@ -297,17 +304,27 @@ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
static inline static inline
unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
{ {
return box->pmu->type->event_ctl + if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + return CFL_UNC_CBO_7_PERFEVTSEL0 +
uncore_msr_box_offset(box); (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
} else {
return box->pmu->type->event_ctl +
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
uncore_msr_box_offset(box);
}
} }
static inline static inline
unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
{ {
return box->pmu->type->perf_ctr + if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + return CFL_UNC_CBO_7_PER_CTR0 +
uncore_msr_box_offset(box); (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
} else {
return box->pmu->type->perf_ctr +
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
uncore_msr_box_offset(box);
}
} }
static inline static inline

View File

@@ -15,6 +15,25 @@
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
/* SNB event control */ /* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -202,6 +221,10 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
} }
/* The 8th CBOX has different MSR space */
if (box->pmu->pmu_idx == 7)
__set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
} }
static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
@@ -228,7 +251,7 @@ static struct intel_uncore_ops skl_uncore_msr_ops = {
static struct intel_uncore_type skl_uncore_cbox = { static struct intel_uncore_type skl_uncore_cbox = {
.name = "cbox", .name = "cbox",
.num_counters = 4, .num_counters = 4,
.num_boxes = 5, .num_boxes = 8,
.perf_ctr_bits = 44, .perf_ctr_bits = 44,
.fixed_ctr_bits = 48, .fixed_ctr_bits = 48,
.perf_ctr = SNB_UNC_CBO_0_PER_CTR0, .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
@@ -569,7 +592,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
}, },
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* end: all zeroes */ }, { /* end: all zeroes */ },
}; };
@@ -618,6 +716,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
{ /* end marker */ } { /* end marker */ }
}; };

View File

@@ -23,7 +23,11 @@
# error Linux requires the Xtensa Windowed Registers Option. # error Linux requires the Xtensa Windowed Registers Option.
#endif #endif
#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH /* Xtensa ABI requires stack alignment to be at least 16 */
#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
#define ARCH_SLAB_MINALIGN STACK_ALIGN
/* /*
* User space process size: 1 GB. * User space process size: 1 GB.

View File

@@ -88,9 +88,12 @@ _SetupMMU:
initialize_mmu initialize_mmu
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
rsr a2, excsave1 rsr a2, excsave1
movi a3, 0x08000000 movi a3, XCHAL_KSEG_PADDR
bltu a2, a3, 1f
sub a2, a2, a3
movi a3, XCHAL_KSEG_SIZE
bgeu a2, a3, 1f bgeu a2, a3, 1f
movi a3, 0xd0000000 movi a3, XCHAL_KSEG_CACHED_VADDR
add a2, a2, a3 add a2, a2, a3
wsr a2, excsave1 wsr a2, excsave1
1: 1:

View File

@@ -605,6 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
if (bio_flagged(bio_src, BIO_THROTTLED)) if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED); bio_set_flag(bio, BIO_THROTTLED);
bio->bi_opf = bio_src->bi_opf; bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter = bio_src->bi_iter; bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec; bio->bi_io_vec = bio_src->bi_io_vec;

View File

@@ -798,9 +798,8 @@ void blk_cleanup_queue(struct request_queue *q)
* dispatch may still be in-progress since we dispatch requests * dispatch may still be in-progress since we dispatch requests
* from more than one contexts. * from more than one contexts.
* *
* No need to quiesce queue if it isn't initialized yet since * We rely on driver to deal with the race in case that queue
* blk_freeze_queue() should be enough for cases of passthrough * initialization isn't done.
* request.
*/ */
if (q->mq_ops && blk_queue_init_done(q)) if (q->mq_ops && blk_queue_init_done(q))
blk_mq_quiesce_queue(q); blk_mq_quiesce_queue(q);

View File

@@ -55,9 +55,11 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
return -EINVAL; return -EINVAL;
while (nr_sects) { while (nr_sects) {
unsigned int req_sects = min_t(unsigned int, nr_sects, sector_t req_sects = min_t(sector_t, nr_sects,
bio_allowed_max_sectors(q)); bio_allowed_max_sectors(q));
WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
bio = blk_next_bio(bio, 0, gfp_mask); bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev); bio_set_dev(bio, bdev);

View File

@@ -248,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
return NULL; return NULL;
bio->bi_disk = bio_src->bi_disk; bio->bi_disk = bio_src->bi_disk;
bio->bi_opf = bio_src->bi_opf; bio->bi_opf = bio_src->bi_opf;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;

View File

@@ -84,7 +84,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{ {
struct crypto_report_cipher rcipher; struct crypto_report_cipher rcipher;
strlcpy(rcipher.type, "cipher", sizeof(rcipher.type)); strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.blocksize = alg->cra_blocksize; rcipher.blocksize = alg->cra_blocksize;
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
@@ -103,7 +103,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
{ {
struct crypto_report_comp rcomp; struct crypto_report_comp rcomp;
strlcpy(rcomp.type, "compression", sizeof(rcomp.type)); strncpy(rcomp.type, "compression", sizeof(rcomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
sizeof(struct crypto_report_comp), &rcomp)) sizeof(struct crypto_report_comp), &rcomp))
goto nla_put_failure; goto nla_put_failure;
@@ -117,7 +117,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
{ {
struct crypto_report_acomp racomp; struct crypto_report_acomp racomp;
strlcpy(racomp.type, "acomp", sizeof(racomp.type)); strncpy(racomp.type, "acomp", sizeof(racomp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
sizeof(struct crypto_report_acomp), &racomp)) sizeof(struct crypto_report_acomp), &racomp))
@@ -132,7 +132,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
{ {
struct crypto_report_akcipher rakcipher; struct crypto_report_akcipher rakcipher;
strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
sizeof(struct crypto_report_akcipher), &rakcipher)) sizeof(struct crypto_report_akcipher), &rakcipher))
@@ -147,7 +147,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
{ {
struct crypto_report_kpp rkpp; struct crypto_report_kpp rkpp;
strlcpy(rkpp.type, "kpp", sizeof(rkpp.type)); strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_KPP, if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
sizeof(struct crypto_report_kpp), &rkpp)) sizeof(struct crypto_report_kpp), &rkpp))
@@ -161,10 +161,10 @@ nla_put_failure:
static int crypto_report_one(struct crypto_alg *alg, static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb) struct crypto_user_alg *ualg, struct sk_buff *skb)
{ {
strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
strlcpy(ualg->cru_driver_name, alg->cra_driver_name, strncpy(ualg->cru_driver_name, alg->cra_driver_name,
sizeof(ualg->cru_driver_name)); sizeof(ualg->cru_driver_name));
strlcpy(ualg->cru_module_name, module_name(alg->cra_module), strncpy(ualg->cru_module_name, module_name(alg->cra_module),
sizeof(ualg->cru_module_name)); sizeof(ualg->cru_module_name));
ualg->cru_type = 0; ualg->cru_type = 0;
@@ -177,7 +177,7 @@ static int crypto_report_one(struct crypto_alg *alg,
if (alg->cra_flags & CRYPTO_ALG_LARVAL) { if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_report_larval rl; struct crypto_report_larval rl;
strlcpy(rl.type, "larval", sizeof(rl.type)); strncpy(rl.type, "larval", sizeof(rl.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
sizeof(struct crypto_report_larval), &rl)) sizeof(struct crypto_report_larval), &rl))
goto nla_put_failure; goto nla_put_failure;

View File

@@ -37,6 +37,8 @@ static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64; u64 v64;
u32 v32; u32 v32;
memset(&raead, 0, sizeof(raead));
strncpy(raead.type, "aead", sizeof(raead.type)); strncpy(raead.type, "aead", sizeof(raead.type));
v32 = atomic_read(&alg->encrypt_cnt); v32 = atomic_read(&alg->encrypt_cnt);
@@ -65,6 +67,8 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64; u64 v64;
u32 v32; u32 v32;
memset(&rcipher, 0, sizeof(rcipher));
strlcpy(rcipher.type, "cipher", sizeof(rcipher.type)); strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
v32 = atomic_read(&alg->encrypt_cnt); v32 = atomic_read(&alg->encrypt_cnt);
@@ -93,6 +97,8 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64; u64 v64;
u32 v32; u32 v32;
memset(&rcomp, 0, sizeof(rcomp));
strlcpy(rcomp.type, "compression", sizeof(rcomp.type)); strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
v32 = atomic_read(&alg->compress_cnt); v32 = atomic_read(&alg->compress_cnt);
rcomp.stat_compress_cnt = v32; rcomp.stat_compress_cnt = v32;
@@ -120,6 +126,8 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64; u64 v64;
u32 v32; u32 v32;
memset(&racomp, 0, sizeof(racomp));
strlcpy(racomp.type, "acomp", sizeof(racomp.type)); strlcpy(racomp.type, "acomp", sizeof(racomp.type));
v32 = atomic_read(&alg->compress_cnt); v32 = atomic_read(&alg->compress_cnt);
racomp.stat_compress_cnt = v32; racomp.stat_compress_cnt = v32;
@@ -147,6 +155,8 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64; u64 v64;
u32 v32; u32 v32;
memset(&rakcipher, 0, sizeof(rakcipher));
strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
v32 = atomic_read(&alg->encrypt_cnt); v32 = atomic_read(&alg->encrypt_cnt);
rakcipher.stat_encrypt_cnt = v32; rakcipher.stat_encrypt_cnt = v32;
@@ -177,6 +187,8 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
struct crypto_stat rkpp; struct crypto_stat rkpp;
u32 v; u32 v;
memset(&rkpp, 0, sizeof(rkpp));
strlcpy(rkpp.type, "kpp", sizeof(rkpp.type)); strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
v = atomic_read(&alg->setsecret_cnt); v = atomic_read(&alg->setsecret_cnt);
@@ -203,6 +215,8 @@ static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64; u64 v64;
u32 v32; u32 v32;
memset(&rhash, 0, sizeof(rhash));
strncpy(rhash.type, "ahash", sizeof(rhash.type)); strncpy(rhash.type, "ahash", sizeof(rhash.type));
v32 = atomic_read(&alg->hash_cnt); v32 = atomic_read(&alg->hash_cnt);
@@ -227,6 +241,8 @@ static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64; u64 v64;
u32 v32; u32 v32;
memset(&rhash, 0, sizeof(rhash));
strncpy(rhash.type, "shash", sizeof(rhash.type)); strncpy(rhash.type, "shash", sizeof(rhash.type));
v32 = atomic_read(&alg->hash_cnt); v32 = atomic_read(&alg->hash_cnt);
@@ -251,6 +267,8 @@ static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
u64 v64; u64 v64;
u32 v32; u32 v32;
memset(&rrng, 0, sizeof(rrng));
strncpy(rrng.type, "rng", sizeof(rrng.type)); strncpy(rrng.type, "rng", sizeof(rrng.type));
v32 = atomic_read(&alg->generate_cnt); v32 = atomic_read(&alg->generate_cnt);
@@ -275,6 +293,8 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct crypto_user_alg *ualg,
struct sk_buff *skb) struct sk_buff *skb)
{ {
memset(ualg, 0, sizeof(*ualg));
strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
strlcpy(ualg->cru_driver_name, alg->cra_driver_name, strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
sizeof(ualg->cru_driver_name)); sizeof(ualg->cru_driver_name));
@@ -291,6 +311,7 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
if (alg->cra_flags & CRYPTO_ALG_LARVAL) { if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_stat rl; struct crypto_stat rl;
memset(&rl, 0, sizeof(rl));
strlcpy(rl.type, "larval", sizeof(rl.type)); strlcpy(rl.type, "larval", sizeof(rl.type));
if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
sizeof(struct crypto_stat), &rl)) sizeof(struct crypto_stat), &rl))

View File

@@ -124,8 +124,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
ctx->cryptd_tfm = cryptd_tfm; ctx->cryptd_tfm = cryptd_tfm;
reqsize = sizeof(struct skcipher_request); reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base); reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
reqsize += sizeof(struct skcipher_request);
crypto_skcipher_set_reqsize(tfm, reqsize); crypto_skcipher_set_reqsize(tfm, reqsize);

View File

@@ -512,7 +512,7 @@ config CRC_PMIC_OPREGION
config XPOWER_PMIC_OPREGION config XPOWER_PMIC_OPREGION
bool "ACPI operation region support for XPower AXP288 PMIC" bool "ACPI operation region support for XPower AXP288 PMIC"
depends on MFD_AXP20X_I2C && IOSF_MBI depends on MFD_AXP20X_I2C && IOSF_MBI=y
help help
This config adds ACPI operation region support for XPower AXP288 PMIC. This config adds ACPI operation region support for XPower AXP288 PMIC.

View File

@@ -2928,9 +2928,9 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
return rc; return rc;
if (ars_status_process_records(acpi_desc)) if (ars_status_process_records(acpi_desc))
return -ENOMEM; dev_err(acpi_desc->dev, "Failed to process ARS records\n");
return 0; return rc;
} }
static int ars_register(struct acpi_nfit_desc *acpi_desc, static int ars_register(struct acpi_nfit_desc *acpi_desc,
@@ -3341,8 +3341,6 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd) struct nvdimm *nvdimm, unsigned int cmd)
{ {
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
struct nfit_spa *nfit_spa;
int rc = 0;
if (nvdimm) if (nvdimm)
return 0; return 0;
@@ -3355,17 +3353,10 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
* just needs guarantees that any ARS it initiates are not * just needs guarantees that any ARS it initiates are not
* interrupted by any intervening start requests from userspace. * interrupted by any intervening start requests from userspace.
*/ */
mutex_lock(&acpi_desc->init_mutex); if (work_busy(&acpi_desc->dwork.work))
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) return -EBUSY;
if (acpi_desc->scrub_spa
|| test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
|| test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
rc = -EBUSY;
break;
}
mutex_unlock(&acpi_desc->init_mutex);
return rc; return 0;
} }
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,

View File

@@ -4553,7 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* These specific Samsung models/firmware-revs do not handle LPM well */ /* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, }, { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */ /* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |

View File

@@ -4148,10 +4148,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
bio.bi_end_io = floppy_rb0_cb; bio.bi_end_io = floppy_rb0_cb;
bio_set_op_attrs(&bio, REQ_OP_READ, 0); bio_set_op_attrs(&bio, REQ_OP_READ, 0);
init_completion(&cbdata.complete);
submit_bio(&bio); submit_bio(&bio);
process_fd_request(); process_fd_request();
init_completion(&cbdata.complete);
wait_for_completion(&cbdata.complete); wait_for_completion(&cbdata.complete);
__free_page(page); __free_page(page);

View File

@@ -160,8 +160,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* Ensure the arm clock divider is what we expect */ /* Ensure the arm clock divider is what we expect */
ret = clk_set_rate(clks[ARM].clk, new_freq * 1000); ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
if (ret) { if (ret) {
int ret1;
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0); ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
if (ret1)
dev_warn(cpu_dev,
"failed to restore vddarm voltage: %d\n", ret1);
return ret; return ret;
} }

View File

@@ -82,7 +82,6 @@ static int __init arm_idle_init_cpu(int cpu)
{ {
int ret; int ret;
struct cpuidle_driver *drv; struct cpuidle_driver *drv;
struct cpuidle_device *dev;
drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL); drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
if (!drv) if (!drv)
@@ -103,13 +102,6 @@ static int __init arm_idle_init_cpu(int cpu)
goto out_kfree_drv; goto out_kfree_drv;
} }
ret = cpuidle_register_driver(drv);
if (ret) {
if (ret != -EBUSY)
pr_err("Failed to register cpuidle driver\n");
goto out_kfree_drv;
}
/* /*
* Call arch CPU operations in order to initialize * Call arch CPU operations in order to initialize
* idle states suspend back-end specific data * idle states suspend back-end specific data
@@ -117,37 +109,21 @@ static int __init arm_idle_init_cpu(int cpu)
ret = arm_cpuidle_init(cpu); ret = arm_cpuidle_init(cpu);
/* /*
* Skip the cpuidle device initialization if the reported * Allow the initialization to continue for other CPUs, if the reported
* failure is a HW misconfiguration/breakage (-ENXIO). * failure is a HW misconfiguration/breakage (-ENXIO).
*/ */
if (ret == -ENXIO)
return 0;
if (ret) { if (ret) {
pr_err("CPU %d failed to init idle CPU ops\n", cpu); pr_err("CPU %d failed to init idle CPU ops\n", cpu);
goto out_unregister_drv; ret = ret == -ENXIO ? 0 : ret;
goto out_kfree_drv;
} }
dev = kzalloc(sizeof(*dev), GFP_KERNEL); ret = cpuidle_register(drv, NULL);
if (!dev) { if (ret)
ret = -ENOMEM; goto out_kfree_drv;
goto out_unregister_drv;
}
dev->cpu = cpu;
ret = cpuidle_register_device(dev);
if (ret) {
pr_err("Failed to register cpuidle device for CPU %d\n",
cpu);
goto out_kfree_dev;
}
return 0; return 0;
out_kfree_dev:
kfree(dev);
out_unregister_drv:
cpuidle_unregister_driver(drv);
out_kfree_drv: out_kfree_drv:
kfree(drv); kfree(drv);
return ret; return ret;
@@ -178,9 +154,7 @@ out_fail:
while (--cpu >= 0) { while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu); dev = per_cpu(cpuidle_devices, cpu);
drv = cpuidle_get_cpu_driver(dev); drv = cpuidle_get_cpu_driver(dev);
cpuidle_unregister_device(dev); cpuidle_unregister(drv);
cpuidle_unregister_driver(drv);
kfree(dev);
kfree(drv); kfree(drv);
} }

View File

@@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
int *splits_in_nents; int *splits_in_nents;
int *splits_out_nents = NULL; int *splits_out_nents = NULL;
struct sec_request_el *el, *temp; struct sec_request_el *el, *temp;
bool split = skreq->src != skreq->dst;
mutex_init(&sec_req->lock); mutex_init(&sec_req->lock);
sec_req->req_base = &skreq->base; sec_req->req_base = &skreq->base;
@@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
if (ret) if (ret)
goto err_free_split_sizes; goto err_free_split_sizes;
if (skreq->src != skreq->dst) { if (split) {
sec_req->len_out = sg_nents(skreq->dst); sec_req->len_out = sg_nents(skreq->dst);
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
&splits_out, &splits_out_nents, &splits_out, &splits_out_nents,
@@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
split_sizes[i], split_sizes[i],
skreq->src != skreq->dst, skreq->src != skreq->dst,
splits_in[i], splits_in_nents[i], splits_in[i], splits_in_nents[i],
splits_out[i], split ? splits_out[i] : NULL,
splits_out_nents[i], info); split ? splits_out_nents[i] : 0,
info);
if (IS_ERR(el)) { if (IS_ERR(el)) {
ret = PTR_ERR(el); ret = PTR_ERR(el);
goto err_free_elements; goto err_free_elements;
@@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
* more refined but this is unlikely to happen so no need. * more refined but this is unlikely to happen so no need.
*/ */
/* Cleanup - all elements in pointer arrays have been coppied */
kfree(splits_in_nents);
kfree(splits_in);
kfree(splits_out_nents);
kfree(splits_out);
kfree(split_sizes);
/* Grab a big lock for a long time to avoid concurrency issues */ /* Grab a big lock for a long time to avoid concurrency issues */
mutex_lock(&queue->queuelock); mutex_lock(&queue->queuelock);
@@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
(!queue->havesoftqueue || (!queue->havesoftqueue ||
kfifo_avail(&queue->softqueue) > steps)) || kfifo_avail(&queue->softqueue) > steps)) ||
!list_empty(&ctx->backlog)) { !list_empty(&ctx->backlog)) {
ret = -EBUSY;
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
list_add_tail(&sec_req->backlog_head, &ctx->backlog); list_add_tail(&sec_req->backlog_head, &ctx->backlog);
mutex_unlock(&queue->queuelock); mutex_unlock(&queue->queuelock);
return -EBUSY; goto out;
} }
ret = -EBUSY;
mutex_unlock(&queue->queuelock); mutex_unlock(&queue->queuelock);
goto err_free_elements; goto err_free_elements;
} }
@@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
if (ret) if (ret)
goto err_free_elements; goto err_free_elements;
return -EINPROGRESS; ret = -EINPROGRESS;
out:
/* Cleanup - all elements in pointer arrays have been copied */
kfree(splits_in_nents);
kfree(splits_in);
kfree(splits_out_nents);
kfree(splits_out);
kfree(split_sizes);
return ret;
err_free_elements: err_free_elements:
list_for_each_entry_safe(el, temp, &sec_req->elements, head) { list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
@@ -854,7 +857,7 @@ err_free_elements:
crypto_skcipher_ivsize(atfm), crypto_skcipher_ivsize(atfm),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
err_unmap_out_sg: err_unmap_out_sg:
if (skreq->src != skreq->dst) if (split)
sec_unmap_sg_on_err(skreq->dst, steps, splits_out, sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
splits_out_nents, sec_req->len_out, splits_out_nents, sec_req->len_out,
info->dev); info->dev);

View File

@@ -265,6 +265,10 @@ void __init efi_init(void)
(params.mmap & ~PAGE_MASK))); (params.mmap & ~PAGE_MASK)));
init_screen_info(); init_screen_info();
/* ARM does not permit early mappings to persist across paging_init() */
if (IS_ENABLED(CONFIG_ARM))
efi_memmap_unmap();
} }
static int __init register_gop_device(void) static int __init register_gop_device(void)

View File

@@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void)
{ {
u64 mapsize; u64 mapsize;
if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) { if (!efi_enabled(EFI_BOOT)) {
pr_info("EFI services will not be available.\n"); pr_info("EFI services will not be available.\n");
return 0; return 0;
} }

View File

@@ -592,7 +592,11 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
early_memunmap(tbl, sizeof(*tbl)); early_memunmap(tbl, sizeof(*tbl));
} }
return 0;
}
int __init efi_apply_persistent_mem_reservations(void)
{
if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
unsigned long prsv = efi.mem_reserve; unsigned long prsv = efi.mem_reserve;
@@ -963,37 +967,44 @@ bool efi_is_table_address(unsigned long phys_addr)
} }
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock); static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
int efi_mem_reserve_persistent(phys_addr_t addr, u64 size) int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
{ {
struct linux_efi_memreserve *rsv, *parent; struct linux_efi_memreserve *rsv;
if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR) if (!efi_memreserve_root)
return -ENODEV; return -ENODEV;
rsv = kmalloc(sizeof(*rsv), GFP_KERNEL); rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
if (!rsv) if (!rsv)
return -ENOMEM; return -ENOMEM;
parent = memremap(efi.mem_reserve, sizeof(*rsv), MEMREMAP_WB);
if (!parent) {
kfree(rsv);
return -ENOMEM;
}
rsv->base = addr; rsv->base = addr;
rsv->size = size; rsv->size = size;
spin_lock(&efi_mem_reserve_persistent_lock); spin_lock(&efi_mem_reserve_persistent_lock);
rsv->next = parent->next; rsv->next = efi_memreserve_root->next;
parent->next = __pa(rsv); efi_memreserve_root->next = __pa(rsv);
spin_unlock(&efi_mem_reserve_persistent_lock); spin_unlock(&efi_mem_reserve_persistent_lock);
memunmap(parent);
return 0; return 0;
} }
static int __init efi_memreserve_root_init(void)
{
if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
return -ENODEV;
efi_memreserve_root = memremap(efi.mem_reserve,
sizeof(*efi_memreserve_root),
MEMREMAP_WB);
if (!efi_memreserve_root)
return -ENOMEM;
return 0;
}
early_initcall(efi_memreserve_root_init);
#ifdef CONFIG_KEXEC #ifdef CONFIG_KEXEC
static int update_efi_random_seed(struct notifier_block *nb, static int update_efi_random_seed(struct notifier_block *nb,
unsigned long code, void *unused) unsigned long code, void *unused)

View File

@@ -75,6 +75,9 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
efi_status_t status; efi_status_t status;
if (IS_ENABLED(CONFIG_ARM))
return;
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
(void **)&rsv); (void **)&rsv);
if (status != EFI_SUCCESS) { if (status != EFI_SUCCESS) {

View File

@@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
return efi_status; return efi_status;
} }
} }
/* shrink the FDT back to its minimum size */
fdt_pack(fdt);
return EFI_SUCCESS; return EFI_SUCCESS;
fdt_set_fail: fdt_set_fail:

View File

@@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
void __init efi_memmap_unmap(void) void __init efi_memmap_unmap(void)
{ {
if (!efi_enabled(EFI_MEMMAP))
return;
if (!efi.memmap.late) { if (!efi.memmap.late) {
unsigned long size; unsigned long size;

View File

@@ -67,7 +67,7 @@ struct efi_runtime_work efi_rts_work;
} \ } \
\ \
init_completion(&efi_rts_work.efi_rts_comp); \ init_completion(&efi_rts_work.efi_rts_comp); \
INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \ INIT_WORK(&efi_rts_work.work, efi_call_rts); \
efi_rts_work.arg1 = _arg1; \ efi_rts_work.arg1 = _arg1; \
efi_rts_work.arg2 = _arg2; \ efi_rts_work.arg2 = _arg2; \
efi_rts_work.arg3 = _arg3; \ efi_rts_work.arg3 = _arg3; \

View File

@@ -1632,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
continue; continue;
} }
/* First check if the entry is already handled */
if (cursor.pfn < frag_start) {
cursor.entry->huge = true;
amdgpu_vm_pt_next(adev, &cursor);
continue;
}
/* If it isn't already handled it can't be a huge page */ /* If it isn't already handled it can't be a huge page */
if (cursor.entry->huge) { if (cursor.entry->huge) {
/* Add the entry to the relocated list to update it. */ /* Add the entry to the relocated list to update it. */
@@ -1701,8 +1694,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
} }
} while (frag_start < entry_end); } while (frag_start < entry_end);
if (frag >= shift) if (amdgpu_vm_pt_descendant(adev, &cursor)) {
/* Mark all child entries as huge */
while (cursor.pfn < frag_start) {
cursor.entry->huge = true;
amdgpu_vm_pt_next(adev, &cursor);
}
} else if (frag >= shift) {
/* or just move on to the next on the same level. */
amdgpu_vm_pt_next(adev, &cursor); amdgpu_vm_pt_next(adev, &cursor);
}
} }
return 0; return 0;

View File

@@ -72,7 +72,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */ /* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/* /*
@@ -82,11 +82,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang. * to get rid of the VM fault and hardware hang.
*/ */
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max((adev->gmc.vram_end >> 18) + 0x1, max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18)); adev->gmc.agp_end >> 18));
else else
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */ /* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start

View File

@@ -90,7 +90,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */ /* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18); min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
/* /*
@@ -100,11 +100,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
* to get rid of the VM fault and hardware hang. * to get rid of the VM fault and hardware hang.
*/ */
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max((adev->gmc.vram_end >> 18) + 0x1, max((adev->gmc.fb_end >> 18) + 0x1,
adev->gmc.agp_end >> 18)); adev->gmc.agp_end >> 18));
else else
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */ /* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +

View File

@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
else else
wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4); wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off)); WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF); WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
/* set rptr, wptr to 0 */ /* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0); WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);

View File

@@ -713,20 +713,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) { for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
table->WatermarkRow[1][i].MinClock = table->WatermarkRow[1][i].MinClock =
cpu_to_le16((uint16_t) cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) / (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
1000); 1000));
table->WatermarkRow[1][i].MaxClock = table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t) cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) / (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
1000); 1000));
table->WatermarkRow[1][i].MinUclk = table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t) cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) / (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1000); 1000));
table->WatermarkRow[1][i].MaxUclk = table->WatermarkRow[1][i].MaxUclk =
cpu_to_le16((uint16_t) cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) / (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1000); 1000));
table->WatermarkRow[1][i].WmSetting = (uint8_t) table->WatermarkRow[1][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
} }
@@ -734,20 +734,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) { for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
table->WatermarkRow[0][i].MinClock = table->WatermarkRow[0][i].MinClock =
cpu_to_le16((uint16_t) cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) / (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
1000); 1000));
table->WatermarkRow[0][i].MaxClock = table->WatermarkRow[0][i].MaxClock =
cpu_to_le16((uint16_t) cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) / (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
1000); 1000));
table->WatermarkRow[0][i].MinUclk = table->WatermarkRow[0][i].MinUclk =
cpu_to_le16((uint16_t) cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) / (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
1000); 1000));
table->WatermarkRow[0][i].MaxUclk = table->WatermarkRow[0][i].MaxUclk =
cpu_to_le16((uint16_t) cpu_to_le16((uint16_t)
(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) / (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
1000); 1000));
table->WatermarkRow[0][i].WmSetting = (uint8_t) table->WatermarkRow[0][i].WmSetting = (uint8_t)
wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
} }

View File

@@ -1275,6 +1275,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
mstb = mgr->mst_primary; mstb = mgr->mst_primary;
if (!mstb)
goto out;
for (i = 0; i < lct - 1; i++) { for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4; int shift = (i % 2) ? 0 : 4;
int port_num = (rad[i / 2] >> shift) & 0xf; int port_num = (rad[i / 2] >> shift) & 0xf;

View File

@@ -97,9 +97,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
/** /**
* drm_driver_legacy_fb_format - compute drm fourcc code from legacy description * drm_driver_legacy_fb_format - compute drm fourcc code from legacy description
* @dev: DRM device
* @bpp: bits per pixels * @bpp: bits per pixels
* @depth: bit depth per pixel * @depth: bit depth per pixel
* @native: use host native byte order
* *
* Computes a drm fourcc pixel format code for the given @bpp/@depth values. * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
* Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config, * Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,

View File

@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
u8 eu_disabled_mask; u8 eu_disabled_mask;
u32 n_disabled; u32 n_disabled;
if (!(sseu->subslice_mask[ss] & BIT(ss))) if (!(sseu->subslice_mask[s] & BIT(ss)))
/* skip disabled subslice */ /* skip disabled subslice */
continue; continue;

View File

@@ -4850,8 +4850,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
* chroma samples for both of the luma samples, and thus we don't * chroma samples for both of the luma samples, and thus we don't
* actually get the expected MPEG2 chroma siting convention :( * actually get the expected MPEG2 chroma siting convention :(
* The same behaviour is observed on pre-SKL platforms as well. * The same behaviour is observed on pre-SKL platforms as well.
*
* Theory behind the formula (note that we ignore sub-pixel
* source coordinates):
* s = source sample position
* d = destination sample position
*
* Downscaling 4:1:
* -0.5
* | 0.0
* | | 1.5 (initial phase)
* | | |
* v v v
* | s | s | s | s |
* | d |
*
* Upscaling 1:4:
* -0.5
* | -0.375 (initial phase)
* | | 0.0
* | | |
* v v v
* | s |
* | d | d | d | d |
*/ */
u16 skl_scaler_calc_phase(int sub, bool chroma_cosited) u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
{ {
int phase = -0x8000; int phase = -0x8000;
u16 trip = 0; u16 trip = 0;
@@ -4859,6 +4882,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
if (chroma_cosited) if (chroma_cosited)
phase += (sub - 1) * 0x8000 / sub; phase += (sub - 1) * 0x8000 / sub;
phase += scale / (2 * sub);
/*
* Hardware initial phase limited to [-0.5:1.5].
* Since the max hardware scale factor is 3.0, we
* should never actually excdeed 1.0 here.
*/
WARN_ON(phase < -0x8000 || phase > 0x18000);
if (phase < 0) if (phase < 0)
phase = 0x10000 + phase; phase = 0x10000 + phase;
else else
@@ -5067,13 +5099,20 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
if (crtc->config->pch_pfit.enabled) { if (crtc->config->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase; u16 uv_rgb_hphase, uv_rgb_vphase;
int pfit_w, pfit_h, hscale, vscale;
int id; int id;
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return; return;
uv_rgb_hphase = skl_scaler_calc_phase(1, false); pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
uv_rgb_vphase = skl_scaler_calc_phase(1, false); pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
id = scaler_state->scaler_id; id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |

View File

@@ -452,6 +452,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (!intel_connector) if (!intel_connector)
return NULL; return NULL;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
connector = &intel_connector->base; connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort); DRM_MODE_CONNECTOR_DisplayPort);
@@ -462,10 +466,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
for_each_pipe(dev_priv, pipe) { for_each_pipe(dev_priv, pipe) {
struct drm_encoder *enc = struct drm_encoder *enc =
&intel_dp->mst_encoders[pipe]->base.base; &intel_dp->mst_encoders[pipe]->base.base;

View File

@@ -1646,7 +1646,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state); struct intel_crtc_state *crtc_state);
u16 skl_scaler_calc_phase(int sub, bool chroma_center); u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(const struct intel_crtc_state *crtc_state, int skl_max_scale(const struct intel_crtc_state *crtc_state,
u32 pixel_format); u32 pixel_format);

View File

@@ -228,7 +228,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
drm_for_each_connector_iter(connector, &conn_iter) { drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_connector *intel_connector = to_intel_connector(connector);
if (intel_connector->encoder->hpd_pin == pin) { /* Don't check MST ports, they don't have pins */
if (!intel_connector->mst_port &&
intel_connector->encoder->hpd_pin == pin) {
if (connector->polled != intel_connector->polled) if (connector->polled != intel_connector->polled)
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
connector->name); connector->name);
@@ -395,37 +397,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
struct intel_encoder *encoder; struct intel_encoder *encoder;
bool storm_detected = false; bool storm_detected = false;
bool queue_dig = false, queue_hp = false; bool queue_dig = false, queue_hp = false;
u32 long_hpd_pulse_mask = 0;
u32 short_hpd_pulse_mask = 0;
enum hpd_pin pin;
if (!pin_mask) if (!pin_mask)
return; return;
spin_lock(&dev_priv->irq_lock); spin_lock(&dev_priv->irq_lock);
for_each_intel_encoder(&dev_priv->drm, encoder) {
enum hpd_pin pin = encoder->hpd_pin;
bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
/*
* Determine whether ->hpd_pulse() exists for each pin, and
* whether we have a short or a long pulse. This is needed
* as each pin may have up to two encoders (HDMI and DP) and
* only the one of them (DP) will have ->hpd_pulse().
*/
for_each_intel_encoder(&dev_priv->drm, encoder) {
bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
enum port port = encoder->port;
bool long_hpd;
pin = encoder->hpd_pin;
if (!(BIT(pin) & pin_mask)) if (!(BIT(pin) & pin_mask))
continue; continue;
if (has_hpd_pulse) { if (!has_hpd_pulse)
bool long_hpd = long_mask & BIT(pin); continue;
enum port port = encoder->port;
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), long_hpd = long_mask & BIT(pin);
long_hpd ? "long" : "short");
/* DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
* For long HPD pulses we want to have the digital queue happen, long_hpd ? "long" : "short");
* but we still want HPD storm detection to function. queue_dig = true;
*/
queue_dig = true; if (long_hpd) {
if (long_hpd) { long_hpd_pulse_mask |= BIT(pin);
dev_priv->hotplug.long_port_mask |= (1 << port); dev_priv->hotplug.long_port_mask |= BIT(port);
} else { } else {
/* for short HPD just trigger the digital queue */ short_hpd_pulse_mask |= BIT(pin);
dev_priv->hotplug.short_port_mask |= (1 << port); dev_priv->hotplug.short_port_mask |= BIT(port);
continue;
}
} }
}
/* Now process each pin just once */
for_each_hpd_pin(pin) {
bool long_hpd;
if (!(BIT(pin) & pin_mask))
continue;
if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) { if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
/* /*
@@ -442,11 +461,22 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED) if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
continue; continue;
if (!has_hpd_pulse) { /*
* Delegate to ->hpd_pulse() if one of the encoders for this
* pin has it, otherwise let the hotplug_work deal with this
* pin directly.
*/
if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
long_hpd = long_hpd_pulse_mask & BIT(pin);
} else {
dev_priv->hotplug.event_bits |= BIT(pin); dev_priv->hotplug.event_bits |= BIT(pin);
long_hpd = true;
queue_hp = true; queue_hp = true;
} }
if (!long_hpd)
continue;
if (intel_hpd_irq_storm_detect(dev_priv, pin)) { if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
dev_priv->hotplug.event_bits &= ~BIT(pin); dev_priv->hotplug.event_bits &= ~BIT(pin);
storm_detected = true; storm_detected = true;

View File

@@ -424,7 +424,8 @@ static u64 execlists_update_context(struct i915_request *rq)
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
/* True 32b PPGTT with dynamic page allocation: update PDP /*
* True 32b PPGTT with dynamic page allocation: update PDP
* registers and point the unallocated PDPs to scratch page. * registers and point the unallocated PDPs to scratch page.
* PML4 is allocated during ppgtt init, so this is not needed * PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode. * in 48-bit mode.
@@ -432,6 +433,17 @@ static u64 execlists_update_context(struct i915_request *rq)
if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm)) if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state); execlists_update_context_pdps(ppgtt, reg_state);
/*
* Make sure the context image is complete before we submit it to HW.
*
* Ostensibly, writes (including the WCB) should be flushed prior to
* an uncached write such as our mmio register access, the empirical
* evidence (esp. on Braswell) suggests that the WC write into memory
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
*/
wmb();
return ce->lrc_desc; return ce->lrc_desc;
} }

View File

@@ -91,6 +91,7 @@ static int
gen4_render_ring_flush(struct i915_request *rq, u32 mode) gen4_render_ring_flush(struct i915_request *rq, u32 mode)
{ {
u32 cmd, *cs; u32 cmd, *cs;
int i;
/* /*
* read/write caches: * read/write caches:
@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
cmd |= MI_INVALIDATE_ISP; cmd |= MI_INVALIDATE_ISP;
} }
cs = intel_ring_begin(rq, 2); i = 2;
if (mode & EMIT_INVALIDATE)
i += 20;
cs = intel_ring_begin(rq, i);
if (IS_ERR(cs)) if (IS_ERR(cs))
return PTR_ERR(cs); return PTR_ERR(cs);
*cs++ = cmd; *cs++ = cmd;
*cs++ = MI_NOOP;
/*
* A random delay to let the CS invalidate take effect? Without this
* delay, the GPU relocation path fails as the CS does not see
* the updated contents. Just as important, if we apply the flushes
* to the EMIT_FLUSH branch (i.e. immediately after the relocation
* write and before the invalidate on the next batch), the relocations
* still fail. This implies that is a delay following invalidation
* that is required to reset the caches as opposed to a delay to
* ensure the memory is written.
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = i915_ggtt_offset(rq->engine->scratch) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
for (i = 0; i < 12; i++)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
*cs++ = i915_ggtt_offset(rq->engine->scratch) |
PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
*cs++ = cmd;
intel_ring_advance(rq, cs); intel_ring_advance(rq, cs);
return 0; return 0;

View File

@@ -2748,6 +2748,12 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.hsw.has_fuses = true, .hsw.has_fuses = true,
}, },
}, },
{
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.id = DISP_PW_ID_NONE,
},
{ {
.name = "power well 2", .name = "power well 2",
.domains = ICL_PW_2_POWER_DOMAINS, .domains = ICL_PW_2_POWER_DOMAINS,
@@ -2759,12 +2765,6 @@ static const struct i915_power_well_desc icl_power_wells[] = {
.hsw.has_fuses = true, .hsw.has_fuses = true,
}, },
}, },
{
.name = "DC off",
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
.ops = &gen9_dc_off_power_well_ops,
.id = DISP_PW_ID_NONE,
},
{ {
.name = "power well 3", .name = "power well 3",
.domains = ICL_PW_3_POWER_DOMAINS, .domains = ICL_PW_3_POWER_DOMAINS,
@@ -3176,8 +3176,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices) u8 req_slices)
{ {
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u32 val;
bool ret; bool ret;
if (req_slices > intel_dbuf_max_slices(dev_priv)) { if (req_slices > intel_dbuf_max_slices(dev_priv)) {
@@ -3188,7 +3187,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
if (req_slices == hw_enabled_slices || req_slices == 0) if (req_slices == hw_enabled_slices || req_slices == 0)
return; return;
val = I915_READ(DBUF_CTL_S2);
if (req_slices > hw_enabled_slices) if (req_slices > hw_enabled_slices)
ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
else else

View File

@@ -302,13 +302,65 @@ skl_plane_max_stride(struct intel_plane *plane,
return min(8192 * cpp, 32768); return min(8192 * cpp, 32768);
} }
static void
skl_program_scaler(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler =
&crtc_state->scaler_state.scalers[scaler_id];
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
u16 y_hphase, uv_rgb_hphase;
u16 y_vphase, uv_rgb_vphase;
int hscale, vscale;
hscale = drm_rect_calc_hscale(&plane_state->base.src,
&plane_state->base.dst,
0, INT_MAX);
vscale = drm_rect_calc_vscale(&plane_state->base.src,
&plane_state->base.dst,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
if (plane_state->base.fb->format->format == DRM_FORMAT_NV12) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
/* MPEG2 chroma siting convention */
uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
} else {
/* not used */
y_hphase = 0;
y_vphase = 0;
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
}
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
}
void void
skl_update_plane(struct intel_plane *plane, skl_update_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state, const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state) const struct intel_plane_state *plane_state)
{ {
struct drm_i915_private *dev_priv = to_i915(plane->base.dev); struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = plane->id; enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe; enum pipe pipe = plane->pipe;
u32 plane_ctl = plane_state->ctl; u32 plane_ctl = plane_state->ctl;
@@ -318,8 +370,6 @@ skl_update_plane(struct intel_plane *plane,
u32 aux_stride = skl_plane_stride(plane_state, 1); u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1; int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1; int crtc_y = plane_state->base.dst.y1;
uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
uint32_t x = plane_state->color_plane[0].x; uint32_t x = plane_state->color_plane[0].x;
uint32_t y = plane_state->color_plane[0].y; uint32_t y = plane_state->color_plane[0].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16; uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
@@ -329,8 +379,6 @@ skl_update_plane(struct intel_plane *plane,
/* Sizes are 0 based */ /* Sizes are 0 based */
src_w--; src_w--;
src_h--; src_h--;
crtc_w--;
crtc_h--;
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -353,41 +401,8 @@ skl_update_plane(struct intel_plane *plane,
(plane_state->color_plane[1].y << 16) | (plane_state->color_plane[1].y << 16) |
plane_state->color_plane[1].x); plane_state->color_plane[1].x);
/* program plane scaler */
if (plane_state->scaler_id >= 0) { if (plane_state->scaler_id >= 0) {
int scaler_id = plane_state->scaler_id; skl_program_scaler(plane, crtc_state, plane_state);
const struct intel_scaler *scaler =
&crtc_state->scaler_state.scalers[scaler_id];
u16 y_hphase, uv_rgb_hphase;
u16 y_vphase, uv_rgb_vphase;
/* TODO: handle sub-pixel coordinates */
if (fb->format->format == DRM_FORMAT_NV12) {
y_hphase = skl_scaler_calc_phase(1, false);
y_vphase = skl_scaler_calc_phase(1, false);
/* MPEG2 chroma siting convention */
uv_rgb_hphase = skl_scaler_calc_phase(2, true);
uv_rgb_vphase = skl_scaler_calc_phase(2, false);
} else {
/* not used */
y_hphase = 0;
y_vphase = 0;
uv_rgb_hphase = skl_scaler_calc_phase(1, false);
uv_rgb_vphase = skl_scaler_calc_phase(1, false);
}
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
((crtc_w + 1) << 16)|(crtc_h + 1));
I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0); I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
} else { } else {

View File

@@ -854,6 +854,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int sof_lines; unsigned int sof_lines;
unsigned int vsync_lines; unsigned int vsync_lines;
/* Use VENCI for 480i and 576i and double HDMI pixels */
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
hdmi_repeat = true;
use_enci = true;
venc_hdmi_latency = 1;
}
if (meson_venc_hdmi_supported_vic(vic)) { if (meson_venc_hdmi_supported_vic(vic)) {
vmode = meson_venc_hdmi_get_vic_vmode(vic); vmode = meson_venc_hdmi_get_vic_vmode(vic);
if (!vmode) { if (!vmode) {
@@ -865,13 +872,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
} else { } else {
meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt); meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
vmode = &vmode_dmt; vmode = &vmode_dmt;
} use_enci = false;
/* Use VENCI for 480i and 576i and double HDMI pixels */
if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
hdmi_repeat = true;
use_enci = true;
venc_hdmi_latency = 1;
} }
/* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */ /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */

View File

@@ -5409,11 +5409,14 @@ static int dsi_probe(struct platform_device *pdev)
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */ * of data to 3 by default */
if (dsi->data->quirks & DSI_QUIRK_GNQ) if (dsi->data->quirks & DSI_QUIRK_GNQ) {
dsi_runtime_get(dsi);
/* NB_DATA_LANES */ /* NB_DATA_LANES */
dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9); dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
else dsi_runtime_put(dsi);
} else {
dsi->num_lanes_supported = 3; dsi->num_lanes_supported = 3;
}
r = dsi_init_output(dsi); r = dsi_init_output(dsi);
if (r) if (r)
@@ -5426,15 +5429,19 @@ static int dsi_probe(struct platform_device *pdev)
} }
r = of_platform_populate(dev->of_node, NULL, NULL, dev); r = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (r) if (r) {
DSSERR("Failed to populate DSI child devices: %d\n", r); DSSERR("Failed to populate DSI child devices: %d\n", r);
goto err_uninit_output;
}
r = component_add(&pdev->dev, &dsi_component_ops); r = component_add(&pdev->dev, &dsi_component_ops);
if (r) if (r)
goto err_uninit_output; goto err_of_depopulate;
return 0; return 0;
err_of_depopulate:
of_platform_depopulate(dev);
err_uninit_output: err_uninit_output:
dsi_uninit_output(dsi); dsi_uninit_output(dsi);
err_pm_disable: err_pm_disable:
@@ -5470,19 +5477,12 @@ static int dsi_runtime_suspend(struct device *dev)
/* wait for current handler to finish before turning the DSI off */ /* wait for current handler to finish before turning the DSI off */
synchronize_irq(dsi->irq); synchronize_irq(dsi->irq);
dispc_runtime_put(dsi->dss->dispc);
return 0; return 0;
} }
static int dsi_runtime_resume(struct device *dev) static int dsi_runtime_resume(struct device *dev)
{ {
struct dsi_data *dsi = dev_get_drvdata(dev); struct dsi_data *dsi = dev_get_drvdata(dev);
int r;
r = dispc_runtime_get(dsi->dss->dispc);
if (r)
return r;
dsi->is_enabled = true; dsi->is_enabled = true;
/* ensure the irq handler sees the is_enabled value */ /* ensure the irq handler sees the is_enabled value */

View File

@@ -1484,16 +1484,23 @@ static int dss_probe(struct platform_device *pdev)
dss); dss);
/* Add all the child devices as components. */ /* Add all the child devices as components. */
r = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (r)
goto err_uninit_debugfs;
omapdss_gather_components(&pdev->dev); omapdss_gather_components(&pdev->dev);
device_for_each_child(&pdev->dev, &match, dss_add_child_component); device_for_each_child(&pdev->dev, &match, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match); r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
if (r) if (r)
goto err_uninit_debugfs; goto err_of_depopulate;
return 0; return 0;
err_of_depopulate:
of_platform_depopulate(&pdev->dev);
err_uninit_debugfs: err_uninit_debugfs:
dss_debugfs_remove_file(dss->debugfs.clk); dss_debugfs_remove_file(dss->debugfs.clk);
dss_debugfs_remove_file(dss->debugfs.dss); dss_debugfs_remove_file(dss->debugfs.dss);
@@ -1522,6 +1529,8 @@ static int dss_remove(struct platform_device *pdev)
{ {
struct dss_device *dss = platform_get_drvdata(pdev); struct dss_device *dss = platform_get_drvdata(pdev);
of_platform_depopulate(&pdev->dev);
component_master_del(&pdev->dev, &dss_component_ops); component_master_del(&pdev->dev, &dss_component_ops);
dss_debugfs_remove_file(dss->debugfs.clk); dss_debugfs_remove_file(dss->debugfs.clk);

View File

@@ -635,10 +635,14 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
hdmi->dss = dss; hdmi->dss = dss;
r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp); r = hdmi_runtime_get(hdmi);
if (r) if (r)
return r; return r;
r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
if (r)
goto err_runtime_put;
r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp); r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
if (r) if (r)
goto err_pll_uninit; goto err_pll_uninit;
@@ -652,12 +656,16 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs, hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
hdmi); hdmi);
hdmi_runtime_put(hdmi);
return 0; return 0;
err_cec_uninit: err_cec_uninit:
hdmi4_cec_uninit(&hdmi->core); hdmi4_cec_uninit(&hdmi->core);
err_pll_uninit: err_pll_uninit:
hdmi_pll_uninit(&hdmi->pll); hdmi_pll_uninit(&hdmi->pll);
err_runtime_put:
hdmi_runtime_put(hdmi);
return r; return r;
} }
@@ -833,32 +841,6 @@ static int hdmi4_remove(struct platform_device *pdev)
return 0; return 0;
} }
static int hdmi_runtime_suspend(struct device *dev)
{
struct omap_hdmi *hdmi = dev_get_drvdata(dev);
dispc_runtime_put(hdmi->dss->dispc);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
struct omap_hdmi *hdmi = dev_get_drvdata(dev);
int r;
r = dispc_runtime_get(hdmi->dss->dispc);
if (r < 0)
return r;
return 0;
}
static const struct dev_pm_ops hdmi_pm_ops = {
.runtime_suspend = hdmi_runtime_suspend,
.runtime_resume = hdmi_runtime_resume,
};
static const struct of_device_id hdmi_of_match[] = { static const struct of_device_id hdmi_of_match[] = {
{ .compatible = "ti,omap4-hdmi", }, { .compatible = "ti,omap4-hdmi", },
{}, {},
@@ -869,7 +851,6 @@ struct platform_driver omapdss_hdmi4hw_driver = {
.remove = hdmi4_remove, .remove = hdmi4_remove,
.driver = { .driver = {
.name = "omapdss_hdmi", .name = "omapdss_hdmi",
.pm = &hdmi_pm_ops,
.of_match_table = hdmi_of_match, .of_match_table = hdmi_of_match,
.suppress_bind_attrs = true, .suppress_bind_attrs = true,
}, },

View File

@@ -825,32 +825,6 @@ static int hdmi5_remove(struct platform_device *pdev)
return 0; return 0;
} }
static int hdmi_runtime_suspend(struct device *dev)
{
struct omap_hdmi *hdmi = dev_get_drvdata(dev);
dispc_runtime_put(hdmi->dss->dispc);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
struct omap_hdmi *hdmi = dev_get_drvdata(dev);
int r;
r = dispc_runtime_get(hdmi->dss->dispc);
if (r < 0)
return r;
return 0;
}
static const struct dev_pm_ops hdmi_pm_ops = {
.runtime_suspend = hdmi_runtime_suspend,
.runtime_resume = hdmi_runtime_resume,
};
static const struct of_device_id hdmi_of_match[] = { static const struct of_device_id hdmi_of_match[] = {
{ .compatible = "ti,omap5-hdmi", }, { .compatible = "ti,omap5-hdmi", },
{ .compatible = "ti,dra7-hdmi", }, { .compatible = "ti,dra7-hdmi", },
@@ -862,7 +836,6 @@ struct platform_driver omapdss_hdmi5hw_driver = {
.remove = hdmi5_remove, .remove = hdmi5_remove,
.driver = { .driver = {
.name = "omapdss_hdmi5", .name = "omapdss_hdmi5",
.pm = &hdmi_pm_ops,
.of_match_table = hdmi_of_match, .of_match_table = hdmi_of_match,
.suppress_bind_attrs = true, .suppress_bind_attrs = true,
}, },

View File

@@ -946,19 +946,12 @@ static int venc_runtime_suspend(struct device *dev)
if (venc->tv_dac_clk) if (venc->tv_dac_clk)
clk_disable_unprepare(venc->tv_dac_clk); clk_disable_unprepare(venc->tv_dac_clk);
dispc_runtime_put(venc->dss->dispc);
return 0; return 0;
} }
static int venc_runtime_resume(struct device *dev) static int venc_runtime_resume(struct device *dev)
{ {
struct venc_device *venc = dev_get_drvdata(dev); struct venc_device *venc = dev_get_drvdata(dev);
int r;
r = dispc_runtime_get(venc->dss->dispc);
if (r < 0)
return r;
if (venc->tv_dac_clk) if (venc->tv_dac_clk)
clk_prepare_enable(venc->tv_dac_clk); clk_prepare_enable(venc->tv_dac_clk);

View File

@@ -350,11 +350,14 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
static void omap_crtc_atomic_enable(struct drm_crtc *crtc, static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state) struct drm_crtc_state *old_state)
{ {
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
int ret; int ret;
DBG("%s", omap_crtc->name); DBG("%s", omap_crtc->name);
priv->dispc_ops->runtime_get(priv->dispc);
spin_lock_irq(&crtc->dev->event_lock); spin_lock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_on(crtc); drm_crtc_vblank_on(crtc);
ret = drm_crtc_vblank_get(crtc); ret = drm_crtc_vblank_get(crtc);
@@ -367,6 +370,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
static void omap_crtc_atomic_disable(struct drm_crtc *crtc, static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state) struct drm_crtc_state *old_state)
{ {
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name); DBG("%s", omap_crtc->name);
@@ -379,6 +383,8 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock); spin_unlock_irq(&crtc->dev->event_lock);
drm_crtc_vblank_off(crtc); drm_crtc_vblank_off(crtc);
priv->dispc_ops->runtime_put(priv->dispc);
} }
static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,

View File

@@ -477,6 +477,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
} }
EXPORT_SYMBOL_GPL(can_put_echo_skb); EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
struct sk_buff *skb = priv->echo_skb[idx];
struct canfd_frame *cf;
if (idx >= priv->echo_skb_max) {
netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
__func__, idx, priv->echo_skb_max);
return NULL;
}
if (!skb) {
netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
__func__, idx);
return NULL;
}
/* Using "struct canfd_frame::len" for the frame
* length is supported on both CAN and CANFD frames.
*/
cf = (struct canfd_frame *)skb->data;
*len_ptr = cf->len;
priv->echo_skb[idx] = NULL;
return skb;
}
/* /*
* Get the skb from the stack and loop it back locally * Get the skb from the stack and loop it back locally
* *
@@ -486,22 +514,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
*/ */
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
{ {
struct can_priv *priv = netdev_priv(dev); struct sk_buff *skb;
u8 len;
BUG_ON(idx >= priv->echo_skb_max); skb = __can_get_echo_skb(dev, idx, &len);
if (!skb)
return 0;
if (priv->echo_skb[idx]) { netif_rx(skb);
struct sk_buff *skb = priv->echo_skb[idx];
struct can_frame *cf = (struct can_frame *)skb->data;
u8 dlc = cf->can_dlc;
netif_rx(priv->echo_skb[idx]); return len;
priv->echo_skb[idx] = NULL;
return dlc;
}
return 0;
} }
EXPORT_SYMBOL_GPL(can_get_echo_skb); EXPORT_SYMBOL_GPL(can_get_echo_skb);

View File

@@ -135,13 +135,12 @@
/* FLEXCAN interrupt flag register (IFLAG) bits */ /* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */ /* Errata ERR005829 step7: Reserve first valid MB */
#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 #define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
#define FLEXCAN_TX_MB_OFF_FIFO 9
#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 #define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
#define FLEXCAN_TX_MB_OFF_TIMESTAMP 1 #define FLEXCAN_TX_MB 63
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1) #define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST 63 #define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1)
#define FLEXCAN_IFLAG_MB(x) BIT(x) #define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
@@ -259,9 +258,7 @@ struct flexcan_priv {
struct can_rx_offload offload; struct can_rx_offload offload;
struct flexcan_regs __iomem *regs; struct flexcan_regs __iomem *regs;
struct flexcan_mb __iomem *tx_mb;
struct flexcan_mb __iomem *tx_mb_reserved; struct flexcan_mb __iomem *tx_mb_reserved;
u8 tx_mb_idx;
u32 reg_ctrl_default; u32 reg_ctrl_default;
u32 reg_imask1_default; u32 reg_imask1_default;
u32 reg_imask2_default; u32 reg_imask2_default;
@@ -515,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
const struct flexcan_priv *priv = netdev_priv(dev); const struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
struct can_frame *cf = (struct can_frame *)skb->data; struct can_frame *cf = (struct can_frame *)skb->data;
u32 can_id; u32 can_id;
u32 data; u32 data;
@@ -537,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
if (cf->can_dlc > 0) { if (cf->can_dlc > 0) {
data = be32_to_cpup((__be32 *)&cf->data[0]); data = be32_to_cpup((__be32 *)&cf->data[0]);
priv->write(data, &priv->tx_mb->data[0]); priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[0]);
} }
if (cf->can_dlc > 4) { if (cf->can_dlc > 4) {
data = be32_to_cpup((__be32 *)&cf->data[4]); data = be32_to_cpup((__be32 *)&cf->data[4]);
priv->write(data, &priv->tx_mb->data[1]); priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[1]);
} }
can_put_echo_skb(skb, dev, 0); can_put_echo_skb(skb, dev, 0);
priv->write(can_id, &priv->tx_mb->can_id); priv->write(can_id, &regs->mb[FLEXCAN_TX_MB].can_id);
priv->write(ctrl, &priv->tx_mb->can_ctrl); priv->write(ctrl, &regs->mb[FLEXCAN_TX_MB].can_ctrl);
/* Errata ERR005829 step8: /* Errata ERR005829 step8:
* Write twice INACTIVE(0x8) code to first MB. * Write twice INACTIVE(0x8) code to first MB.
@@ -563,9 +561,13 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
{ {
struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
struct sk_buff *skb; struct sk_buff *skb;
struct can_frame *cf; struct can_frame *cf;
bool rx_errors = false, tx_errors = false; bool rx_errors = false, tx_errors = false;
u32 timestamp;
timestamp = priv->read(&regs->timer) << 16;
skb = alloc_can_err_skb(dev, &cf); skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb)) if (unlikely(!skb))
@@ -612,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
if (tx_errors) if (tx_errors)
dev->stats.tx_errors++; dev->stats.tx_errors++;
can_rx_offload_irq_queue_err_skb(&priv->offload, skb); can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
} }
static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
{ {
struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
struct sk_buff *skb; struct sk_buff *skb;
struct can_frame *cf; struct can_frame *cf;
enum can_state new_state, rx_state, tx_state; enum can_state new_state, rx_state, tx_state;
int flt; int flt;
struct can_berr_counter bec; struct can_berr_counter bec;
u32 timestamp;
timestamp = priv->read(&regs->timer) << 16;
flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
@@ -652,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
if (unlikely(new_state == CAN_STATE_BUS_OFF)) if (unlikely(new_state == CAN_STATE_BUS_OFF))
can_bus_off(dev); can_bus_off(dev);
can_rx_offload_irq_queue_err_skb(&priv->offload, skb); can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
} }
static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -720,9 +726,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
priv->write(BIT(n - 32), &regs->iflag2); priv->write(BIT(n - 32), &regs->iflag2);
} else { } else {
priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1); priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
priv->read(&regs->timer);
} }
/* Read the Free Running Timer. It is optional but recommended
* to unlock Mailbox as soon as possible and make it available
* for reception.
*/
priv->read(&regs->timer);
return 1; return 1;
} }
@@ -732,9 +743,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
struct flexcan_regs __iomem *regs = priv->regs; struct flexcan_regs __iomem *regs = priv->regs;
u32 iflag1, iflag2; u32 iflag1, iflag2;
iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default; iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default & ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
return (u64)iflag2 << 32 | iflag1; return (u64)iflag2 << 32 | iflag1;
} }
@@ -746,11 +757,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs; struct flexcan_regs __iomem *regs = priv->regs;
irqreturn_t handled = IRQ_NONE; irqreturn_t handled = IRQ_NONE;
u32 reg_iflag1, reg_esr; u32 reg_iflag2, reg_esr;
enum can_state last_state = priv->can.state; enum can_state last_state = priv->can.state;
reg_iflag1 = priv->read(&regs->iflag1);
/* reception interrupt */ /* reception interrupt */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
u64 reg_iflag; u64 reg_iflag;
@@ -764,6 +773,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
break; break;
} }
} else { } else {
u32 reg_iflag1;
reg_iflag1 = priv->read(&regs->iflag1);
if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
handled = IRQ_HANDLED; handled = IRQ_HANDLED;
can_rx_offload_irq_offload_fifo(&priv->offload); can_rx_offload_irq_offload_fifo(&priv->offload);
@@ -779,17 +791,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
} }
} }
reg_iflag2 = priv->read(&regs->iflag2);
/* transmission complete interrupt */ /* transmission complete interrupt */
if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) {
u32 reg_ctrl = priv->read(&regs->mb[FLEXCAN_TX_MB].can_ctrl);
handled = IRQ_HANDLED; handled = IRQ_HANDLED;
stats->tx_bytes += can_get_echo_skb(dev, 0); stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
0, reg_ctrl << 16);
stats->tx_packets++; stats->tx_packets++;
can_led_event(dev, CAN_LED_EVENT_TX); can_led_event(dev, CAN_LED_EVENT_TX);
/* after sending a RTR frame MB is in RX mode */ /* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
&priv->tx_mb->can_ctrl); &regs->mb[FLEXCAN_TX_MB].can_ctrl);
priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1); priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), &regs->iflag2);
netif_wake_queue(dev); netif_wake_queue(dev);
} }
@@ -931,15 +948,13 @@ static int flexcan_chip_start(struct net_device *dev)
reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
FLEXCAN_MCR_IDAM_C; FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
reg_mcr &= ~FLEXCAN_MCR_FEN; reg_mcr &= ~FLEXCAN_MCR_FEN;
reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last); else
} else { reg_mcr |= FLEXCAN_MCR_FEN;
reg_mcr |= FLEXCAN_MCR_FEN |
FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
}
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
priv->write(reg_mcr, &regs->mcr); priv->write(reg_mcr, &regs->mcr);
@@ -982,16 +997,17 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_ctrl2, &regs->ctrl2); priv->write(reg_ctrl2, &regs->ctrl2);
} }
/* clear and invalidate all mailboxes first */
for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
&regs->mb[i].can_ctrl);
}
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
priv->write(FLEXCAN_MB_CODE_RX_EMPTY, priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
&regs->mb[i].can_ctrl); &regs->mb[i].can_ctrl);
}
} else {
/* clear and invalidate unused mailboxes first */
for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
&regs->mb[i].can_ctrl);
}
} }
/* Errata ERR005829: mark first TX mailbox as INACTIVE */ /* Errata ERR005829: mark first TX mailbox as INACTIVE */
@@ -1000,7 +1016,7 @@ static int flexcan_chip_start(struct net_device *dev)
/* mark TX mailbox as INACTIVE */ /* mark TX mailbox as INACTIVE */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
&priv->tx_mb->can_ctrl); &regs->mb[FLEXCAN_TX_MB].can_ctrl);
/* acceptance mask/acceptance code (accept everything) */ /* acceptance mask/acceptance code (accept everything) */
priv->write(0x0, &regs->rxgmask); priv->write(0x0, &regs->rxgmask);
@@ -1355,17 +1371,13 @@ static int flexcan_probe(struct platform_device *pdev)
priv->devtype_data = devtype_data; priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver; priv->reg_xceiver = reg_xceiver;
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
} else { else
priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
}
priv->tx_mb = &regs->mb[priv->tx_mb_idx];
priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); priv->reg_imask1_default = 0;
priv->reg_imask2_default = 0; priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
priv->offload.mailbox_read = flexcan_mailbox_read; priv->offload.mailbox_read = flexcan_mailbox_read;

View File

@@ -24,6 +24,9 @@
#define RCAR_CAN_DRV_NAME "rcar_can" #define RCAR_CAN_DRV_NAME "rcar_can"
#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
BIT(CLKR_CLKEXT))
/* Mailbox configuration: /* Mailbox configuration:
* mailbox 60 - 63 - Rx FIFO mailboxes * mailbox 60 - 63 - Rx FIFO mailboxes
* mailbox 56 - 59 - Tx FIFO mailboxes * mailbox 56 - 59 - Tx FIFO mailboxes
@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
goto fail_clk; goto fail_clk;
} }
if (clock_select >= ARRAY_SIZE(clock_names)) { if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
err = -EINVAL; err = -EINVAL;
dev_err(&pdev->dev, "invalid CAN clock selected\n"); dev_err(&pdev->dev, "invalid CAN clock selected\n");
goto fail_clk; goto fail_clk;

View File

@@ -211,7 +211,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
} }
EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
struct sk_buff *skb, u32 timestamp)
{
struct can_rx_offload_cb *cb;
unsigned long flags;
if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max)
return -ENOMEM;
cb = can_rx_offload_get_cb(skb);
cb->timestamp = timestamp;
spin_lock_irqsave(&offload->skb_queue.lock, flags);
__skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
can_rx_offload_schedule(offload);
return 0;
}
EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp)
{
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
u8 len;
int err;
skb = __can_get_echo_skb(dev, idx, &len);
if (!skb)
return 0;
err = can_rx_offload_queue_sorted(offload, skb, timestamp);
if (err) {
stats->rx_errors++;
stats->tx_fifo_errors++;
}
return len;
}
EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb)
{ {
if (skb_queue_len(&offload->skb_queue) > if (skb_queue_len(&offload->skb_queue) >
offload->skb_queue_len_max) offload->skb_queue_len_max)
@@ -222,7 +269,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
{ {

View File

@@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net)
{ {
struct hi3110_priv *priv = netdev_priv(net); struct hi3110_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi; struct spi_device *spi = priv->spi;
unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING; unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH;
int ret; int ret;
ret = open_candev(net); ret = open_candev(net);

View File

@@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
context = &priv->tx_contexts[i]; context = &priv->tx_contexts[i];
context->echo_index = i; context->echo_index = i;
can_put_echo_skb(skb, netdev, context->echo_index);
++priv->active_tx_contexts; ++priv->active_tx_contexts;
if (priv->active_tx_contexts >= (int)dev->max_tx_urbs) if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
netif_stop_queue(netdev); netif_stop_queue(netdev);
@@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
dev_kfree_skb(skb); dev_kfree_skb(skb);
spin_lock_irqsave(&priv->tx_contexts_lock, flags); spin_lock_irqsave(&priv->tx_contexts_lock, flags);
can_free_echo_skb(netdev, context->echo_index);
context->echo_index = dev->max_tx_urbs; context->echo_index = dev->max_tx_urbs;
--priv->active_tx_contexts; --priv->active_tx_contexts;
netif_wake_queue(netdev); netif_wake_queue(netdev);
@@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
context->priv = priv; context->priv = priv;
can_put_echo_skb(skb, netdev, context->echo_index);
usb_fill_bulk_urb(urb, dev->udev, usb_fill_bulk_urb(urb, dev->udev,
usb_sndbulkpipe(dev->udev, usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress), dev->bulk_out->bEndpointAddress),

View File

@@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
new_state : CAN_STATE_ERROR_ACTIVE; new_state : CAN_STATE_ERROR_ACTIVE;
can_change_state(netdev, cf, tx_state, rx_state); can_change_state(netdev, cf, tx_state, rx_state);
if (priv->can.restart_ms &&
old_state >= CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF)
cf->can_id |= CAN_ERR_RESTARTED;
} }
if (new_state == CAN_STATE_BUS_OFF) { if (new_state == CAN_STATE_BUS_OFF) {
@@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
can_bus_off(netdev); can_bus_off(netdev);
} }
if (priv->can.restart_ms &&
old_state >= CAN_STATE_BUS_OFF &&
new_state < CAN_STATE_BUS_OFF)
cf->can_id |= CAN_ERR_RESTARTED;
} }
if (!skb) { if (!skb) {

View File

@@ -35,10 +35,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/usb.h> #include <linux/usb.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#define UCAN_DRIVER_NAME "ucan" #define UCAN_DRIVER_NAME "ucan"
#define UCAN_MAX_RX_URBS 8 #define UCAN_MAX_RX_URBS 8
/* the CAN controller needs a while to enable/disable the bus */ /* the CAN controller needs a while to enable/disable the bus */
@@ -1575,11 +1571,8 @@ err_firmware_needs_update:
/* disconnect the device */ /* disconnect the device */
static void ucan_disconnect(struct usb_interface *intf) static void ucan_disconnect(struct usb_interface *intf)
{ {
struct usb_device *udev;
struct ucan_priv *up = usb_get_intfdata(intf); struct ucan_priv *up = usb_get_intfdata(intf);
udev = interface_to_usbdev(intf);
usb_set_intfdata(intf, NULL); usb_set_intfdata(intf, NULL);
if (up) { if (up) {

View File

@@ -2191,6 +2191,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \ #define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
E1HVN_MAX) E1HVN_MAX)
/* Following is the DMAE channel number allocation for the clients.
* MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
* Driver: 0-3 and 8-11 (for PF dmae operations)
* 4 and 12 (for stats requests)
*/
#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */
/* PCIE link and speed */ /* PCIE link and speed */
#define PCICFG_LINK_WIDTH 0x1f00000 #define PCICFG_LINK_WIDTH 0x1f00000
#define PCICFG_LINK_WIDTH_SHIFT 20 #define PCICFG_LINK_WIDTH_SHIFT 20

View File

@@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp); rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode; rdata->network_cos_mode = start_params->network_cos_mode;
rdata->dmae_cmd_id = BNX2X_FW_DMAE_C;
rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port); rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port); rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);

View File

@@ -1675,7 +1675,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else { } else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) { if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM) if (dev->features & NETIF_F_RXCSUM)
cpr->rx_l4_csum_errors++; bnapi->cp_ring.rx_l4_csum_errors++;
} }
} }
@@ -8714,6 +8714,26 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc; return rc;
} }
static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
u32 ring_id, u32 *prod, u32 *cons)
{
struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_dbg_ring_info_get_input req = {0};
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
req.ring_type = ring_type;
req.fw_ring_id = cpu_to_le32(ring_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
*prod = le32_to_cpu(resp->producer_index);
*cons = le32_to_cpu(resp->consumer_index);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi) static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
{ {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring; struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -8821,6 +8841,11 @@ static void bnxt_timer(struct timer_list *t)
bnxt_queue_sp_work(bp); bnxt_queue_sp_work(bp);
} }
} }
if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
bnxt_restart_timer: bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval); mod_timer(&bp->timer, jiffies + bp->current_interval);
} }
@@ -8851,6 +8876,44 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
bnxt_rtnl_unlock_sp(bp); bnxt_rtnl_unlock_sp(bp);
} }
static void bnxt_chk_missed_irq(struct bnxt *bp)
{
int i;
if (!(bp->flags & BNXT_FLAG_CHIP_P5))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
u32 fw_ring_id;
int j;
if (!bnapi)
continue;
cpr = &bnapi->cp_ring;
for (j = 0; j < 2; j++) {
struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
u32 val[2];
if (!cpr2 || cpr2->has_more_work ||
!bnxt_has_work(bp, cpr2))
continue;
if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
continue;
}
fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
cpr->missed_irqs++;
}
}
}
static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work) static void bnxt_sp_task(struct work_struct *work)
@@ -8930,6 +8993,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
bnxt_tc_flow_stats_work(bp); bnxt_tc_flow_stats_work(bp);
if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
bnxt_chk_missed_irq(bp);
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting. * must be the last functions to be called before exiting.
*/ */
@@ -10087,6 +10153,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
bnxt_hwrm_func_qcfg(bp); bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp); bnxt_hwrm_port_led_qcaps(bp);
bnxt_ethtool_init(bp); bnxt_ethtool_init(bp);
bnxt_dcb_init(bp); bnxt_dcb_init(bp);
@@ -10120,7 +10187,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
} }
bnxt_hwrm_vnic_qcaps(bp);
if (bnxt_rfs_supported(bp)) { if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE; dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) { if (bnxt_rfs_capable(bp)) {

View File

@@ -798,6 +798,8 @@ struct bnxt_cp_ring_info {
u8 had_work_done:1; u8 had_work_done:1;
u8 has_more_work:1; u8 has_more_work:1;
u32 last_cp_raw_cons;
struct bnxt_coal rx_ring_coal; struct bnxt_coal rx_ring_coal;
u64 rx_packets; u64 rx_packets;
u64 rx_bytes; u64 rx_bytes;
@@ -816,6 +818,7 @@ struct bnxt_cp_ring_info {
dma_addr_t hw_stats_map; dma_addr_t hw_stats_map;
u32 hw_stats_ctx_id; u32 hw_stats_ctx_id;
u64 rx_l4_csum_errors; u64 rx_l4_csum_errors;
u64 missed_irqs;
struct bnxt_ring_struct cp_ring_struct; struct bnxt_ring_struct cp_ring_struct;
@@ -1527,6 +1530,7 @@ struct bnxt {
#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
#define BNXT_FLOW_STATS_SP_EVENT 15 #define BNXT_FLOW_STATS_SP_EVENT 15
#define BNXT_UPDATE_PHY_SP_EVENT 16 #define BNXT_UPDATE_PHY_SP_EVENT 16
#define BNXT_RING_COAL_NOW_SP_EVENT 17
struct bnxt_hw_resc hw_resc; struct bnxt_hw_resc hw_resc;
struct bnxt_pf_info pf; struct bnxt_pf_info pf;

View File

@@ -137,7 +137,7 @@ reset_coalesce:
return rc; return rc;
} }
#define BNXT_NUM_STATS 21 #define BNXT_NUM_STATS 22
#define BNXT_RX_STATS_ENTRY(counter) \ #define BNXT_RX_STATS_ENTRY(counter) \
{ BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -384,6 +384,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (k = 0; k < stat_fields; j++, k++) for (k = 0; k < stat_fields; j++, k++)
buf[j] = le64_to_cpu(hw_stats[k]); buf[j] = le64_to_cpu(hw_stats[k]);
buf[j++] = cpr->rx_l4_csum_errors; buf[j++] = cpr->rx_l4_csum_errors;
buf[j++] = cpr->missed_irqs;
bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
le64_to_cpu(cpr->hw_stats->rx_discard_pkts); le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -468,6 +469,8 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
buf += ETH_GSTRING_LEN; buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: rx_l4_csum_errors", i); sprintf(buf, "[%d]: rx_l4_csum_errors", i);
buf += ETH_GSTRING_LEN; buf += ETH_GSTRING_LEN;
sprintf(buf, "[%d]: missed_irqs", i);
buf += ETH_GSTRING_LEN;
} }
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
strcpy(buf, bnxt_sw_func_stats[i].string); strcpy(buf, bnxt_sw_func_stats[i].string);
@@ -2942,8 +2945,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
record->asic_state = 0; record->asic_state = 0;
strlcpy(record->system_name, utsname()->nodename, strlcpy(record->system_name, utsname()->nodename,
sizeof(record->system_name)); sizeof(record->system_name));
record->year = cpu_to_le16(tm.tm_year); record->year = cpu_to_le16(tm.tm_year + 1900);
record->month = cpu_to_le16(tm.tm_mon); record->month = cpu_to_le16(tm.tm_mon + 1);
record->day = cpu_to_le16(tm.tm_mday); record->day = cpu_to_le16(tm.tm_mday);
record->hour = cpu_to_le16(tm.tm_hour); record->hour = cpu_to_le16(tm.tm_hour);
record->minute = cpu_to_le16(tm.tm_min); record->minute = cpu_to_le16(tm.tm_min);

View File

@@ -43,6 +43,9 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
if (ulp_id == BNXT_ROCE_ULP) { if (ulp_id == BNXT_ROCE_ULP) {
unsigned int max_stat_ctxs; unsigned int max_stat_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5)
return -EOPNOTSUPP;
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
bp->num_stat_ctxs == max_stat_ctxs) bp->num_stat_ctxs == max_stat_ctxs)

View File

@@ -67,7 +67,6 @@ config CHELSIO_T3
config CHELSIO_T4 config CHELSIO_T4
tristate "Chelsio Communications T4/T5/T6 Ethernet support" tristate "Chelsio Communications T4/T5/T6 Ethernet support"
depends on PCI && (IPV6 || IPV6=n) depends on PCI && (IPV6 || IPV6=n)
depends on THERMAL || !THERMAL
select FW_LOADER select FW_LOADER
select MDIO select MDIO
select ZLIB_DEFLATE select ZLIB_DEFLATE

Some files were not shown because too many files have changed in this diff Show More