Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Conflicts:

drivers/net/can/dev.c
  commit 03f16c5075 ("can: dev: can_restart: fix use after free bug")
  commit 3e77f70e73 ("can: dev: move driver related infrastructure into separate subdir")

  Code move.

drivers/net/dsa/b53/b53_common.c
 commit 8e4052c32d ("net: dsa: b53: fix an off by one in checking "vlan->vid"")
 commit b7a9e0da2d ("net: switchdev: remove vid_begin -> vid_end range from VLAN objects")

 Field rename.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-01-20 12:16:11 -08:00
commit 0fe2f273ab
285 changed files with 3218 additions and 1452 deletions

View File

@ -55,6 +55,8 @@ Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
Ben Gardner <bgardner@wabtec.com>
Ben M Cahill <ben.m.cahill@intel.com>
Björn Steinbrink <B.Steinbrink@gmx.de>
Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>

View File

@ -5972,6 +5972,10 @@
This option is obsoleted by the "nopv" option, which
has equivalent effect for XEN platform.
xen_no_vector_callback
[KNL,X86,XEN] Disable the vector callback for Xen
event channel interrupts.
xen_scrub_pages= [XEN]
Boolean option to control scrubbing pages before giving them back
to Xen, for use by other domains. Can be also changed at runtime

View File

@ -50,8 +50,8 @@ The following files belong to it:
0x00000010 Memory Uncorrectable non-fatal
0x00000020 Memory Uncorrectable fatal
0x00000040 PCI Express Correctable
0x00000080 PCI Express Uncorrectable fatal
0x00000100 PCI Express Uncorrectable non-fatal
0x00000080 PCI Express Uncorrectable non-fatal
0x00000100 PCI Express Uncorrectable fatal
0x00000200 Platform Correctable
0x00000400 Platform Uncorrectable non-fatal
0x00000800 Platform Uncorrectable fatal

View File

@ -534,3 +534,6 @@ offload. Hence, TLS TX device feature flag requires TX csum offload being set.
Disabling the latter implies clearing the former. Disabling TX checksum offload
should not affect old connections, and drivers should make sure checksum
calculation does not break for them.
Similarly, device-offloaded TLS decryption implies doing RXCSUM. If the user
does not want to enable RX csum offload, TLS RX device feature is disabled
as well.

View File

@ -906,7 +906,7 @@ AMD KFD
M: Felix Kuehling <Felix.Kuehling@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git git://people.freedesktop.org/~agd5f/linux
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd*.[ch]
F: drivers/gpu/drm/amd/amdkfd/
F: drivers/gpu/drm/amd/include/cik_structs.h
@ -3334,7 +3334,7 @@ F: arch/riscv/net/
X: arch/riscv/net/bpf_jit_comp64.c
BPF JIT for RISC-V (64-bit)
M: Björn Töpel <bjorn.topel@gmail.com>
M: Björn Töpel <bjorn@kernel.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
@ -14827,7 +14827,7 @@ M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git git://people.freedesktop.org/~agd5f/linux
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/
F: drivers/gpu/drm/radeon/
F: include/uapi/drm/amdgpu_drm.h
@ -16328,6 +16328,7 @@ M: Pekka Enberg <penberg@kernel.org>
M: David Rientjes <rientjes@google.com>
M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
M: Andrew Morton <akpm@linux-foundation.org>
M: Vlastimil Babka <vbabka@suse.cz>
L: linux-mm@kvack.org
S: Maintained
F: include/linux/sl?b*.h
@ -19430,7 +19431,7 @@ F: drivers/net/ethernet/*/*/*xdp*
K: (?:\b|_)xdp(?:\b|_)
XDP SOCKETS (AF_XDP)
M: Björn Töpel <bjorn.topel@intel.com>
M: Björn Töpel <bjorn@kernel.org>
M: Magnus Karlsson <magnus.karlsson@intel.com>
R: Jonathan Lemon <jonathan.lemon@gmail.com>
L: netdev@vger.kernel.org

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 11
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*

View File

@ -371,7 +371,7 @@ static int __init xen_guest_init(void)
}
gnttab_init();
if (!xen_initial_domain())
xenbus_probe(NULL);
xenbus_probe();
/*
* Making sure board specific code will not set up ops for

View File

@ -174,8 +174,6 @@ config ARM64
select HAVE_NMI
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API

View File

@ -17,7 +17,7 @@
#include <asm/lse.h>
#define ATOMIC_OP(op) \
static inline void arch_##op(int i, atomic_t *v) \
static __always_inline void arch_##op(int i, atomic_t *v) \
{ \
__lse_ll_sc_body(op, i, v); \
}
@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
#undef ATOMIC_OP
#define ATOMIC_FETCH_OP(name, op) \
static inline int arch_##op##name(int i, atomic_t *v) \
static __always_inline int arch_##op##name(int i, atomic_t *v) \
{ \
return __lse_ll_sc_body(op##name, i, v); \
}
@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
#undef ATOMIC_FETCH_OPS
#define ATOMIC64_OP(op) \
static inline void arch_##op(long i, atomic64_t *v) \
static __always_inline void arch_##op(long i, atomic64_t *v) \
{ \
__lse_ll_sc_body(op, i, v); \
}
@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
#undef ATOMIC64_OP
#define ATOMIC64_FETCH_OP(name, op) \
static inline long arch_##op##name(long i, atomic64_t *v) \
static __always_inline long arch_##op##name(long i, atomic64_t *v) \
{ \
return __lse_ll_sc_body(op##name, i, v); \
}
@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_FETCH_OPS
static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
return __lse_ll_sc_body(atomic64_dec_if_positive, v);
}

View File

@ -94,8 +94,7 @@
#endif /* CONFIG_ARM64_FORCE_52BIT */
extern phys_addr_t arm64_dma_phys_limit;
extern phys_addr_t arm64_dma32_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT ((arm64_dma_phys_limit ? : arm64_dma32_phys_limit) - 1)
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
struct debug_info {
#ifdef CONFIG_HAVE_HW_BREAKPOINT

View File

@ -75,7 +75,7 @@ int main(void)
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
BLANK();
#ifdef CONFIG_COMPAT
DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));

View File

@ -35,7 +35,7 @@
*/
.macro ftrace_regs_entry, allregs=0
/* Make room for pt_regs, plus a callee frame */
sub sp, sp, #(S_FRAME_SIZE + 16)
sub sp, sp, #(PT_REGS_SIZE + 16)
/* Save function arguments (and x9 for simplicity) */
stp x0, x1, [sp, #S_X0]
@ -61,15 +61,15 @@
.endif
/* Save the callsite's SP and LR */
add x10, sp, #(S_FRAME_SIZE + 16)
add x10, sp, #(PT_REGS_SIZE + 16)
stp x9, x10, [sp, #S_LR]
/* Save the PC after the ftrace callsite */
str x30, [sp, #S_PC]
/* Create a frame record for the callsite above pt_regs */
stp x29, x9, [sp, #S_FRAME_SIZE]
add x29, sp, #S_FRAME_SIZE
stp x29, x9, [sp, #PT_REGS_SIZE]
add x29, sp, #PT_REGS_SIZE
/* Create our frame record within pt_regs. */
stp x29, x30, [sp, #S_STACKFRAME]
@ -120,7 +120,7 @@ ftrace_common_return:
ldr x9, [sp, #S_PC]
/* Restore the callsite's SP */
add sp, sp, #S_FRAME_SIZE + 16
add sp, sp, #PT_REGS_SIZE + 16
ret x9
SYM_CODE_END(ftrace_common)
@ -130,7 +130,7 @@ SYM_CODE_START(ftrace_graph_caller)
ldr x0, [sp, #S_PC]
sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
add x1, sp, #S_LR // parent_ip (callsite's LR)
ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
bl prepare_ftrace_return
b ftrace_common_return
SYM_CODE_END(ftrace_graph_caller)

View File

@ -75,7 +75,7 @@ alternative_else_nop_endif
.endif
#endif
sub sp, sp, #S_FRAME_SIZE
sub sp, sp, #PT_REGS_SIZE
#ifdef CONFIG_VMAP_STACK
/*
* Test whether the SP has overflowed, without corrupting a GPR.
@ -96,7 +96,7 @@ alternative_else_nop_endif
* userspace, and can clobber EL0 registers to free up GPRs.
*/
/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
/* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
msr tpidr_el0, x0
/* Recover the original x0 value and stash it in tpidrro_el0 */
@ -253,7 +253,7 @@ alternative_else_nop_endif
scs_load tsk, x20
.else
add x21, sp, #S_FRAME_SIZE
add x21, sp, #PT_REGS_SIZE
get_current_task tsk
.endif /* \el == 0 */
mrs x22, elr_el1
@ -377,7 +377,7 @@ alternative_else_nop_endif
ldp x26, x27, [sp, #16 * 13]
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp
add sp, sp, #PT_REGS_SIZE // restore sp
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
@ -580,12 +580,12 @@ __bad_stack:
/*
* Store the original GPRs to the new stack. The orginal SP (minus
* S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
* PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
*/
sub sp, sp, #S_FRAME_SIZE
sub sp, sp, #PT_REGS_SIZE
kernel_entry 1
mrs x0, tpidr_el0
add x0, x0, #S_FRAME_SIZE
add x0, x0, #PT_REGS_SIZE
str x0, [sp, #S_SP]
/* Stash the regs for handle_bad_stack */

View File

@ -23,8 +23,6 @@
#include <linux/platform_device.h>
#include <linux/sched_clock.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <linux/cpufreq.h>
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@ -1250,21 +1248,10 @@ static struct platform_driver armv8_pmu_driver = {
static int __init armv8_pmu_driver_init(void)
{
int ret;
if (acpi_disabled)
ret = platform_driver_register(&armv8_pmu_driver);
return platform_driver_register(&armv8_pmu_driver);
else
ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
/*
* Try to re-initialize lockup detector after PMU init in
* case PMU events are triggered via NMIs.
*/
if (ret == 0 && arm_pmu_irq_is_nmi())
lockup_detector_init();
return ret;
return arm_pmu_acpi_probe(armv8_pmuv3_init);
}
device_initcall(armv8_pmu_driver_init)
@ -1322,27 +1309,3 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
}
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
/*
* Safe maximum CPU frequency in case a particular platform doesn't implement
* cpufreq driver. Although, architecture doesn't put any restrictions on
* maximum frequency but 5 GHz seems to be safe maximum given the available
* Arm CPUs in the market which are clocked much less than 5 GHz. On the other
* hand, we can't make it much higher as it would lead to a large hard-lockup
* detection timeout on parts which are running slower (eg. 1GHz on
* Developerbox) and doesn't possess a cpufreq driver.
*/
#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz
u64 hw_nmi_get_sample_period(int watchdog_thresh)
{
unsigned int cpu = smp_processor_id();
unsigned long max_cpu_freq;
max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
if (!max_cpu_freq)
max_cpu_freq = SAFE_MAX_CPU_FREQ;
return (u64)max_cpu_freq * watchdog_thresh;
}
#endif

View File

@ -25,7 +25,7 @@
stp x24, x25, [sp, #S_X24]
stp x26, x27, [sp, #S_X26]
stp x28, x29, [sp, #S_X28]
add x0, sp, #S_FRAME_SIZE
add x0, sp, #PT_REGS_SIZE
stp lr, x0, [sp, #S_LR]
/*
* Construct a useful saved PSTATE
@ -62,7 +62,7 @@
.endm
SYM_CODE_START(kretprobe_trampoline)
sub sp, sp, #S_FRAME_SIZE
sub sp, sp, #PT_REGS_SIZE
save_all_base_regs
@ -76,7 +76,7 @@ SYM_CODE_START(kretprobe_trampoline)
restore_all_base_regs
add sp, sp, #S_FRAME_SIZE
add sp, sp, #PT_REGS_SIZE
ret
SYM_CODE_END(kretprobe_trampoline)

View File

@ -914,13 +914,6 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_flags)
{
/*
* The assembly code enters us with IRQs off, but it hasn't
* informed the tracing code of that for efficiency reasons.
* Update the trace code with the current status.
*/
trace_hardirqs_off();
do {
if (thread_flags & _TIF_NEED_RESCHED) {
/* Unmask Debug and SError for the next task */

View File

@ -9,6 +9,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exception.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
@ -165,15 +166,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
flags = current_thread_info()->flags;
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
/*
* We're off to userspace, where interrupts are
* always enabled after we restore the flags from
* the SPSR.
*/
trace_hardirqs_on();
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
return;
}
local_daif_restore(DAIF_PROCCTX);
}

View File

@ -53,13 +53,13 @@ s64 memstart_addr __ro_after_init = -1;
EXPORT_SYMBOL(memstart_addr);
/*
* We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
* memory as some devices, namely the Raspberry Pi 4, have peripherals with
* this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
* bit addressable memory area.
* If the corresponding config options are enabled, we create both ZONE_DMA
* and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
* unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
* In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
* otherwise it is empty.
*/
phys_addr_t arm64_dma_phys_limit __ro_after_init;
phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
/*
@ -84,7 +84,7 @@ static void __init reserve_crashkernel(void)
if (crash_base == 0) {
/* Current arm64 boot protocol requires 2MB alignment */
crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
crash_size, SZ_2M);
if (crash_base == 0) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@ -196,6 +196,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
unsigned int __maybe_unused acpi_zone_dma_bits;
unsigned int __maybe_unused dt_zone_dma_bits;
phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
#ifdef CONFIG_ZONE_DMA
acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
@ -205,8 +206,12 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = dma32_phys_limit;
#endif
if (!arm64_dma_phys_limit)
arm64_dma_phys_limit = PHYS_MASK + 1;
max_zone_pfns[ZONE_NORMAL] = max;
free_area_init(max_zone_pfns);
@ -394,16 +399,9 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
if (IS_ENABLED(CONFIG_ZONE_DMA32))
arm64_dma32_phys_limit = max_zone_phys(32);
else
arm64_dma32_phys_limit = PHYS_MASK + 1;
reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@ -438,6 +436,11 @@ void __init bootmem_init(void)
sparse_init();
zone_sizes_init(min, max);
/*
* Reserve the CMA area after arm64_dma_phys_limit was initialised.
*/
dma_contiguous_reserve(arm64_dma_phys_limit);
/*
* request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here.
@ -455,7 +458,7 @@ void __init bootmem_init(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
max_pfn > PFN_DOWN(arm64_dma_phys_limit))
swiotlb_init(1);
else
swiotlb_force = SWIOTLB_NO_FORCE;

View File

@ -3,6 +3,7 @@
#define _ASM_IA64_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
#include <asm/page.h>
/*
* SECTION_SIZE_BITS 2^N: how big each section will be
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space

View File

@ -13,6 +13,7 @@
#include <linux/libfdt.h>
#include <asm/addrspace.h>
#include <asm/unaligned.h>
/*
* These two variables specify the free mem region
@ -117,7 +118,7 @@ void decompress_kernel(unsigned long boot_heap_start)
dtb_size = fdt_totalsize((void *)&__appended_dtb);
/* last four bytes is always image size in little endian */
image_size = le32_to_cpup((void *)&__image_end - 4);
image_size = get_unaligned_le32((void *)&__image_end - 4);
/* copy dtb to where the booted kernel will expect it */
memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,

View File

@ -1444,7 +1444,7 @@ static void octeon_irq_setup_secondary_ciu2(void)
static int __init octeon_irq_init_ciu(
struct device_node *ciu_node, struct device_node *parent)
{
unsigned int i, r;
int i, r;
struct irq_chip *chip;
struct irq_chip *chip_edge;
struct irq_chip *chip_mbox;

View File

@ -103,4 +103,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
#undef ns_to_kernel_old_timeval
#define ns_to_kernel_old_timeval ns_to_old_timeval32
/*
* Some data types as stored in coredump.
*/
#define user_long_t compat_long_t
#define user_siginfo_t compat_siginfo_t
#define copy_siginfo_to_external copy_siginfo_to_external32
#include "../../../fs/binfmt_elf.c"

View File

@ -106,4 +106,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
#undef ns_to_kernel_old_timeval
#define ns_to_kernel_old_timeval ns_to_old_timeval32
/*
* Some data types as stored in coredump.
*/
#define user_long_t compat_long_t
#define user_siginfo_t compat_siginfo_t
#define copy_siginfo_to_external copy_siginfo_to_external32
#include "../../../fs/binfmt_elf.c"

View File

@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset)
static inline __init unsigned long rotate_xor(unsigned long hash,
const void *area, size_t size)
{
size_t i;
unsigned long *ptr = (unsigned long *)area;
const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
size_t diff, i;
diff = (void *)ptr - area;
if (unlikely(size < diff + sizeof(hash)))
return hash;
size = ALIGN_DOWN(size - diff, sizeof(hash));
for (i = 0; i < size / sizeof(hash); i++) {
/* Rotate by odd number of bits and XOR. */

View File

@ -103,6 +103,8 @@ int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz
return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
}
#ifdef __powerpc64__
static __always_inline
int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
@ -115,10 +117,22 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
}
#ifdef CONFIG_VDSO32
#else
#define BUILD_VDSO32 1
static __always_inline
int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts);
}
static __always_inline
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts);
}
static __always_inline
int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
{

View File

@ -187,6 +187,12 @@ SECTIONS
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
_sinittext = .;
INIT_TEXT
/*
*.init.text might be RO so we must ensure this section ends on
* a page boundary.
*/
. = ALIGN(PAGE_SIZE);
_einittext = .;
#ifdef CONFIG_PPC64
*(.tramp.ftrace.init);
@ -200,6 +206,8 @@ SECTIONS
EXIT_TEXT
}
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(16)
. = ALIGN(8);

View File

@ -137,7 +137,7 @@ config PA_BITS
config PAGE_OFFSET
hex
default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
default 0xC0000000 if 32BIT && MAXPHYSMEM_1GB
default 0x80000000 if 64BIT && !MMU
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
@ -247,10 +247,12 @@ config MODULE_SECTIONS
choice
prompt "Maximum Physical Memory"
default MAXPHYSMEM_2GB if 32BIT
default MAXPHYSMEM_1GB if 32BIT
default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
config MAXPHYSMEM_1GB
bool "1GiB"
config MAXPHYSMEM_2GB
bool "2GiB"
config MAXPHYSMEM_128GB

View File

@ -88,7 +88,9 @@
phy-mode = "gmii";
phy-handle = <&phy0>;
phy0: ethernet-phy@0 {
compatible = "ethernet-phy-id0007.0771";
reg = <0>;
reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
};
};

View File

@ -64,6 +64,8 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_SPI=y
CONFIG_SPI_SIFIVE=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_POWER_RESET=y
CONFIG_DRM=y

View File

@ -99,7 +99,6 @@
| _PAGE_DIRTY)
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \

View File

@ -10,7 +10,7 @@
#include <linux/types.h>
#ifndef GENERIC_TIME_VSYSCALL
#ifndef CONFIG_GENERIC_TIME_VSYSCALL
struct vdso_data {
};
#endif

View File

@ -26,7 +26,16 @@ cache_get_priv_group(struct cacheinfo *this_leaf)
static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(smp_processor_id());
/*
* Using raw_smp_processor_id() elides a preemptability check, but this
* is really indicative of a larger problem: the cacheinfo UABI assumes
* that cores have a homonogenous view of the cache hierarchy. That
* happens to be the case for the current set of RISC-V systems, but
* likely won't be true in general. Since there's no way to provide
* correct information for these systems via the current UABI we're
* just eliding the check for now.
*/
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
struct cacheinfo *this_leaf;
int index;

View File

@ -124,15 +124,15 @@ skip_context_tracking:
REG_L a1, (a1)
jr a1
1:
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_on
#endif
/*
* Exceptions run with interrupts enabled or disabled depending on the
* state of SR_PIE in m/sstatus.
*/
andi t0, s1, SR_PIE
beqz t0, 1f
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_on
#endif
csrs CSR_STATUS, SR_IE
1:
@ -155,6 +155,15 @@ skip_context_tracking:
tail do_trap_unknown
handle_syscall:
#ifdef CONFIG_RISCV_M_MODE
/*
* When running is M-Mode (no MMU config), MPIE does not get set.
* As a result, we need to force enable interrupts here because
* handle_exception did not do set SR_IE as it always sees SR_PIE
* being cleared.
*/
csrs CSR_STATUS, SR_IE
#endif
#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
/* Recover a0 - a7 for system calls */
REG_L a0, PT_A0(sp)
@ -186,14 +195,7 @@ check_syscall_nr:
* Syscall number held in a7.
* If syscall number is above allowed value, redirect to ni_syscall.
*/
bge a7, t0, 1f
/*
* Check if syscall is rejected by tracer, i.e., a7 == -1.
* If yes, we pretend it was executed.
*/
li t1, -1
beq a7, t1, ret_from_syscall_rejected
blt a7, t1, 1f
bgeu a7, t0, 1f
/* Call syscall */
la s0, sys_call_table
slli t0, a7, RISCV_LGPTR

View File

@ -127,7 +127,9 @@ static void __init init_resources(void)
{
struct memblock_region *region = NULL;
struct resource *res = NULL;
int ret = 0;
struct resource *mem_res = NULL;
size_t mem_res_sz = 0;
int ret = 0, i = 0;
code_res.start = __pa_symbol(_text);
code_res.end = __pa_symbol(_etext) - 1;
@ -145,16 +147,17 @@ static void __init init_resources(void)
bss_res.end = __pa_symbol(__bss_stop) - 1;
bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
if (!mem_res)
panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
/*
* Start by adding the reserved regions, if they overlap
* with /memory regions, insert_resource later on will take
* care of it.
*/
for_each_reserved_mem_region(region) {
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
if (!res)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(struct resource));
res = &mem_res[i++];
res->name = "Reserved";
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
@ -171,8 +174,10 @@ static void __init init_resources(void)
* Ignore any other reserved regions within
* system memory.
*/
if (memblock_is_memory(res->start))
if (memblock_is_memory(res->start)) {
memblock_free((phys_addr_t) res, sizeof(struct resource));
continue;
}
ret = add_resource(&iomem_resource, res);
if (ret < 0)
@ -181,10 +186,7 @@ static void __init init_resources(void)
/* Add /memory regions to the resource tree */
for_each_mem_region(region) {
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
if (!res)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(struct resource));
res = &mem_res[i++];
if (unlikely(memblock_is_nomap(region))) {
res->name = "Reserved";
@ -205,9 +207,9 @@ static void __init init_resources(void)
return;
error:
memblock_free((phys_addr_t) res, sizeof(struct resource));
/* Better an empty resource tree than an inconsistent one */
release_child_resources(&iomem_resource);
memblock_free((phys_addr_t) mem_res, mem_res_sz);
}

View File

@ -14,7 +14,7 @@
#include <asm/stacktrace.h>
register unsigned long sp_in_global __asm__("sp");
register const unsigned long sp_in_global __asm__("sp");
#ifdef CONFIG_FRAME_POINTER
@ -28,9 +28,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
const register unsigned long current_sp = sp_in_global;
fp = (unsigned long)__builtin_frame_address(0);
sp = current_sp;
sp = sp_in_global;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */

View File

@ -4,6 +4,7 @@
* Copyright (C) 2017 SiFive
*/
#include <linux/of_clk.h>
#include <linux/clocksource.h>
#include <linux/delay.h>
#include <asm/sbi.h>
@ -24,6 +25,8 @@ void __init time_init(void)
riscv_timebase = prop;
lpj_fine = riscv_timebase / HZ;
of_clk_init(NULL);
timer_probe();
}

View File

@ -12,7 +12,7 @@
#include <linux/binfmts.h>
#include <linux/err.h>
#include <asm/page.h>
#ifdef GENERIC_TIME_VSYSCALL
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
#include <vdso/datapage.h>
#else
#include <asm/vdso.h>

View File

@ -157,9 +157,10 @@ disable:
void __init setup_bootmem(void)
{
phys_addr_t mem_start = 0;
phys_addr_t start, end = 0;
phys_addr_t start, dram_end, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
phys_addr_t max_mapped_addr = __pa(~(ulong)0);
u64 i;
/* Find the memory region containing the kernel */
@ -181,7 +182,18 @@ void __init setup_bootmem(void)
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
dram_end = memblock_end_of_DRAM();
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
* macro. Make sure that last 4k bytes are not usable by memblock
* if end of dram is equal to maximum addressable memory.
*/
if (max_mapped_addr == (dram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
max_pfn = PFN_DOWN(dram_end);
max_low_pfn = max_pfn;
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
set_max_mapnr(max_low_pfn);

View File

@ -93,8 +93,8 @@ void __init kasan_init(void)
VMALLOC_END));
for_each_mem_range(i, &_start, &_end) {
void *start = (void *)_start;
void *end = (void *)_end;
void *start = (void *)__va(_start);
void *end = (void *)__va(_end);
if (start >= end)
break;

View File

@ -315,6 +315,25 @@ static struct syscore_ops hv_syscore_ops = {
.resume = hv_resume,
};
static void (* __initdata old_setup_percpu_clockev)(void);
static void __init hv_stimer_setup_percpu_clockev(void)
{
/*
* Ignore any errors in setting up stimer clockevents
* as we can run with the LAPIC timer as a fallback.
*/
(void)hv_stimer_alloc();
/*
* Still register the LAPIC timer, because the direct-mode STIMER is
* not supported by old versions of Hyper-V. This also allows users
* to switch to LAPIC timer via /sys, if they want to.
*/
if (old_setup_percpu_clockev)
old_setup_percpu_clockev();
}
/*
* This function is to be invoked early in the boot sequence after the
* hypervisor has been detected.
@ -393,10 +412,14 @@ void __init hyperv_init(void)
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
/*
* Ignore any errors in setting up stimer clockevents
* as we can run with the LAPIC timer as a fallback.
* hyperv_init() is called before LAPIC is initialized: see
* apic_intr_mode_init() -> x86_platform.apic_post_init() and
* apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
* depends on LAPIC, so hv_stimer_alloc() should be called from
* x86_init.timers.setup_percpu_clockev.
*/
(void)hv_stimer_alloc();
old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
hv_apic_init();

View File

@ -164,10 +164,10 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
else
per_cpu(xen_vcpu_id, cpu) = cpu;
rc = xen_vcpu_setup(cpu);
if (rc)
if (rc || !xen_have_vector_callback)
return rc;
if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
if (xen_feature(XENFEAT_hvm_safe_pvclock))
xen_setup_timer(cpu);
rc = xen_smp_intr_init(cpu);
@ -188,6 +188,8 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
return 0;
}
static bool no_vector_callback __initdata;
static void __init xen_hvm_guest_init(void)
{
if (xen_pv_domain())
@ -207,7 +209,7 @@ static void __init xen_hvm_guest_init(void)
xen_panic_handler_init();
if (xen_feature(XENFEAT_hvm_callback_vector))
if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;
xen_hvm_smp_init();
@ -233,6 +235,13 @@ static __init int xen_parse_nopv(char *arg)
}
early_param("xen_nopv", xen_parse_nopv);
static __init int xen_parse_no_vector_callback(char *arg)
{
no_vector_callback = true;
return 0;
}
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
bool __init xen_hvm_need_lapic(void)
{
if (xen_pv_domain())

View File

@ -33,9 +33,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
int cpu;
native_smp_prepare_cpus(max_cpus);
WARN_ON(xen_smp_intr_init(0));
xen_init_lock_cpu(0);
if (xen_have_vector_callback) {
WARN_ON(xen_smp_intr_init(0));
xen_init_lock_cpu(0);
}
for_each_possible_cpu(cpu) {
if (cpu == 0)
@ -50,9 +52,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static void xen_hvm_cpu_die(unsigned int cpu)
{
if (common_cpu_die(cpu) == 0) {
xen_smp_intr_free(cpu);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
if (xen_have_vector_callback) {
xen_smp_intr_free(cpu);
xen_uninit_lock_cpu(cpu);
xen_teardown_timer(cpu);
}
}
}
#else
@ -64,14 +68,19 @@ static void xen_hvm_cpu_die(unsigned int cpu)
void __init xen_hvm_smp_init(void)
{
if (!xen_have_vector_callback)
return;
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.smp_cpus_done = xen_smp_cpus_done;
smp_ops.cpu_die = xen_hvm_cpu_die;
if (!xen_have_vector_callback) {
#ifdef CONFIG_PARAVIRT_SPINLOCKS
nopvspin = true;
#endif
return;
}
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
smp_ops.smp_cpus_done = xen_smp_cpus_done;
}

View File

@ -356,7 +356,8 @@ int public_key_verify_signature(const struct public_key *pkey,
if (ret)
goto error_free_key;
if (strcmp(sig->pkey_algo, "sm2") == 0 && sig->data_size) {
if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 &&
sig->data_size) {
ret = cert_sig_digest_update(sig, tfm);
if (ret)
goto error_free_key;

View File

@ -107,6 +107,8 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
if (!min)
min = 1;
speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
tmpl->speed = speed;

View File

@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
extern struct list_head acpi_bus_id_list;
struct acpi_device_bus_id {
char bus_id[15];
const char *bus_id;
unsigned int instance_no;
struct list_head node;
};

View File

@ -486,6 +486,7 @@ static void acpi_device_del(struct acpi_device *device)
acpi_device_bus_id->instance_no--;
else {
list_del(&acpi_device_bus_id->node);
kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
}
break;
@ -674,7 +675,14 @@ int acpi_device_add(struct acpi_device *device,
}
if (!found) {
acpi_device_bus_id = new_bus_id;
strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
acpi_device_bus_id->bus_id =
kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
if (!acpi_device_bus_id->bus_id) {
pr_err(PREFIX "Memory allocation error for bus id\n");
result = -ENOMEM;
goto err_free_new_bus_id;
}
acpi_device_bus_id->instance_no = 0;
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
}
@ -709,6 +717,11 @@ int acpi_device_add(struct acpi_device *device,
if (device->parent)
list_del(&device->node);
list_del(&device->wakeup_list);
err_free_new_bus_id:
if (!found)
kfree(new_bus_id);
mutex_unlock(&acpi_device_lock);
err_detach:

View File

@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP
config CRYPTO_DEV_OMAP_SHAM
tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
depends on ARCH_OMAP2PLUS
select CRYPTO_ENGINE
select CRYPTO_SHA1
select CRYPTO_MD5
select CRYPTO_SHA256

View File

@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
buffer->vaddr = NULL;
}
/* free page list */
kfree(buffer->pages);
/* release memory */
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
kfree(buffer);
}

View File

@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
union igp_info {
struct atom_integrated_system_info_v1_11 v11;
struct atom_integrated_system_info_v1_12 v12;
struct atom_integrated_system_info_v2_1 v21;
};
union umc_info {
@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (adev->flags & AMD_IS_APU) {
igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
mem_channel_number = igp_info->v11.umachannelnumber;
/* channel width is 64 */
if (vram_width)
*vram_width = mem_channel_number * 64;
mem_type = igp_info->v11.memorytype;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
switch (frev) {
case 1:
switch (crev) {
case 11:
case 12:
mem_channel_number = igp_info->v11.umachannelnumber;
if (!mem_channel_number)
mem_channel_number = 1;
/* channel width is 64 */
if (vram_width)
*vram_width = mem_channel_number * 64;
mem_type = igp_info->v11.memorytype;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break;
default:
return -EINVAL;
}
break;
case 12:
mem_channel_number = igp_info->v12.umachannelnumber;
/* channel width is 64 */
if (vram_width)
*vram_width = mem_channel_number * 64;
mem_type = igp_info->v12.memorytype;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
case 2:
switch (crev) {
case 1:
case 2:
mem_channel_number = igp_info->v21.umachannelnumber;
if (!mem_channel_number)
mem_channel_number = 1;
/* channel width is 64 */
if (vram_width)
*vram_width = mem_channel_number * 64;
mem_type = igp_info->v21.memorytype;
if (vram_type)
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;

View File

@ -3034,7 +3034,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#endif
default:
if (amdgpu_dc > 0)
DRM_INFO("Display Core has been requested via kernel parameter "
DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
"but isn't supported by ASIC, ignoring\n");
return false;
}

View File

@ -1085,6 +1085,8 @@ static const struct pci_device_id pciidlist[] = {
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
/* Navi12 */
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},

View File

@ -99,6 +99,10 @@
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1
#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441
#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1
#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261
@ -160,6 +164,9 @@
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
@ -3324,6 +3331,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@ -7192,6 +7200,9 @@ static int gfx_v10_0_hw_init(void *handle)
if (adev->asic_type == CHIP_SIENNA_CICHLID)
gfx_v10_3_program_pbb_mode(adev);
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
gfx_v10_3_set_power_brake_sequence(adev);
return r;
}
@ -7377,8 +7388,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
switch (adev->asic_type) {
case CHIP_VANGOGH:
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
break;
default:
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
break;
}
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock;
@ -9169,6 +9188,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
}
}
static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
{
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
(0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) |
(0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) |
(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA,
(0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) |
(0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) |
(0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) |
(0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT));
WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid,
(0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) |
(0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) |
(0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT));
WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
WREG32_SOC15(GC, 0, mmDIDT_IND_DATA,
(0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
}
const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_GFX,

View File

@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */

View File

@ -1239,7 +1239,8 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
if (adev->pdev->device == 0x1636)
if ((adev->pdev->device == 0x1636) ||
(adev->pdev->device == 0x164c))
adev->apu_flags |= AMD_APU_IS_RENOIR;
else
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;

View File

@ -1040,11 +1040,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
(struct crat_subtype_iolink *)sub_type_hdr);
if (ret < 0)
return ret;
crat_table->length += (sub_type_hdr->length * entries);
crat_table->total_entries += entries;
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length * entries);
if (entries) {
crat_table->length += (sub_type_hdr->length * entries);
crat_table->total_entries += entries;
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length * entries);
}
#else
pr_info("IO link not available for non x86 platforms\n");
#endif

View File

@ -939,41 +939,6 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
}
#endif
#ifdef CONFIG_DEBUG_FS
static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
{
dm->crc_win_x_start_property =
drm_property_create_range(adev_to_drm(dm->adev),
DRM_MODE_PROP_ATOMIC,
"AMD_CRC_WIN_X_START", 0, U16_MAX);
if (!dm->crc_win_x_start_property)
return -ENOMEM;
dm->crc_win_y_start_property =
drm_property_create_range(adev_to_drm(dm->adev),
DRM_MODE_PROP_ATOMIC,
"AMD_CRC_WIN_Y_START", 0, U16_MAX);
if (!dm->crc_win_y_start_property)
return -ENOMEM;
dm->crc_win_x_end_property =
drm_property_create_range(adev_to_drm(dm->adev),
DRM_MODE_PROP_ATOMIC,
"AMD_CRC_WIN_X_END", 0, U16_MAX);
if (!dm->crc_win_x_end_property)
return -ENOMEM;
dm->crc_win_y_end_property =
drm_property_create_range(adev_to_drm(dm->adev),
DRM_MODE_PROP_ATOMIC,
"AMD_CRC_WIN_Y_END", 0, U16_MAX);
if (!dm->crc_win_y_end_property)
return -ENOMEM;
return 0;
}
#endif
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@ -1120,10 +1085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_init_callbacks(adev->dm.dc, &init_params);
}
#endif
#ifdef CONFIG_DEBUG_FS
if (create_crtc_crc_properties(&adev->dm))
DRM_ERROR("amdgpu: failed to create crc property.\n");
#endif
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
@ -5333,64 +5294,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->crc_src = cur->crc_src;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
#ifdef CONFIG_DEBUG_FS
state->crc_window = cur->crc_window;
#endif
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
}
#ifdef CONFIG_DEBUG_FS
static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct dm_crtc_state *dm_new_state =
to_dm_crtc_state(crtc_state);
if (property == adev->dm.crc_win_x_start_property)
dm_new_state->crc_window.x_start = val;
else if (property == adev->dm.crc_win_y_start_property)
dm_new_state->crc_window.y_start = val;
else if (property == adev->dm.crc_win_x_end_property)
dm_new_state->crc_window.x_end = val;
else if (property == adev->dm.crc_win_y_end_property)
dm_new_state->crc_window.y_end = val;
else
return -EINVAL;
return 0;
}
static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
uint64_t *val)
{
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct dm_crtc_state *dm_state =
to_dm_crtc_state(state);
if (property == adev->dm.crc_win_x_start_property)
*val = dm_state->crc_window.x_start;
else if (property == adev->dm.crc_win_y_start_property)
*val = dm_state->crc_window.y_start;
else if (property == adev->dm.crc_win_x_end_property)
*val = dm_state->crc_window.x_end;
else if (property == adev->dm.crc_win_y_end_property)
*val = dm_state->crc_window.y_end;
else
return -EINVAL;
return 0;
}
#endif
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
@ -5457,10 +5366,6 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
#ifdef CONFIG_DEBUG_FS
.atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
.atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
#endif
};
static enum drm_connector_status
@ -6662,25 +6567,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
return 0;
}
#ifdef CONFIG_DEBUG_FS
static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
struct amdgpu_crtc *acrtc)
{
drm_object_attach_property(&acrtc->base.base,
dm->crc_win_x_start_property,
0);
drm_object_attach_property(&acrtc->base.base,
dm->crc_win_y_start_property,
0);
drm_object_attach_property(&acrtc->base.base,
dm->crc_win_x_end_property,
0);
drm_object_attach_property(&acrtc->base.base,
dm->crc_win_y_end_property,
0);
}
#endif
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t crtc_index)
@ -6728,9 +6614,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
#ifdef CONFIG_DEBUG_FS
attach_crtc_crc_properties(dm, acrtc);
#endif
return 0;
fail:
@ -8367,7 +8251,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
bool configure_crc = false;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@ -8377,27 +8260,20 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_retain(dm_new_crtc_state->stream);
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
}
if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
#ifdef CONFIG_DEBUG_FS
/**
* Frontend may have changed so reapply the CRC capture
* settings for the stream.
*/
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
configure_crc = true;
} else {
if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
configure_crc = true;
}
if (configure_crc)
if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
crtc, dm_new_crtc_state,
dm_new_crtc_state->crc_src);
}
#endif
}
}

View File

@ -336,32 +336,6 @@ struct amdgpu_display_manager {
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
#ifdef CONFIG_DEBUG_FS
/**
* @crc_win_x_start_property:
*
* X start of the crc calculation window
*/
struct drm_property *crc_win_x_start_property;
/**
* @crc_win_y_start_property:
*
* Y start of the crc calculation window
*/
struct drm_property *crc_win_y_start_property;
/**
* @crc_win_x_end_property:
*
* X end of the crc calculation window
*/
struct drm_property *crc_win_x_end_property;
/**
* @crc_win_y_end_property:
*
* Y end of the crc calculation window
*/
struct drm_property *crc_win_y_end_property;
#endif
/**
* @mst_encoders:
*
@ -448,15 +422,6 @@ struct dm_plane_state {
struct dc_plane_state *dc_state;
};
#ifdef CONFIG_DEBUG_FS
struct crc_rec {
uint16_t x_start;
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
};
#endif
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
@ -479,9 +444,6 @@ struct dm_crtc_state {
struct dc_info_packet vrr_infopacket;
int abm_level;
#ifdef CONFIG_DEBUG_FS
struct crc_rec crc_window;
#endif
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)

View File

@ -81,41 +81,6 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
return pipe_crc_sources;
}
static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
{
dm_crtc_state->crc_window.x_start = 0;
dm_crtc_state->crc_window.y_start = 0;
dm_crtc_state->crc_window.x_end = 0;
dm_crtc_state->crc_window.y_end = 0;
}
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
{
bool ret = true;
if ((dm_crtc_state->crc_window.x_start != 0) ||
(dm_crtc_state->crc_window.y_start != 0) ||
(dm_crtc_state->crc_window.x_end != 0) ||
(dm_crtc_state->crc_window.y_end != 0))
ret = false;
return ret;
}
bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
struct dm_crtc_state *dm_old_crtc_state)
{
bool ret = false;
if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) ||
(dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) ||
(dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) ||
(dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end))
ret = true;
return ret;
}
int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
@ -140,7 +105,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
int ret = 0;
struct crc_params *crc_window = NULL, tmp_window;
/* Configuration will be deferred to stream enable. */
if (!stream_state)
@ -150,24 +114,8 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
/* Enable CRTC CRC generation if necessary. */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
if (!enable)
amdgpu_dm_set_crc_window_default(dm_crtc_state);
if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
crc_window = &tmp_window;
tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start;
tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start;
tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end;
tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end;
tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start;
tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start;
tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end;
tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end;
}
if (!dc_stream_configure_crc(stream_state->ctx->dc,
stream_state, crc_window, enable, enable)) {
stream_state, NULL, enable, enable)) {
ret = -EINVAL;
goto unlock;
}

View File

@ -46,13 +46,10 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
}
/* amdgpu_dm_crc.c */
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
struct dm_crtc_state *dm_old_crtc_state);
#ifdef CONFIG_DEBUG_FS
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source);
#ifdef CONFIG_DEBUG_FS
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *src_name,

View File

@ -3992,7 +3992,7 @@ bool dc_link_dp_set_test_pattern(
unsigned int cust_pattern_size)
{
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
struct pipe_ctx *pipe_ctx = &pipes[0];
struct pipe_ctx *pipe_ctx = NULL;
unsigned int lane;
unsigned int i;
unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
@ -4002,12 +4002,18 @@ bool dc_link_dp_set_test_pattern(
memset(&training_pattern, 0, sizeof(training_pattern));
for (i = 0; i < MAX_PIPES; i++) {
if (pipes[i].stream == NULL)
continue;
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
pipe_ctx = &pipes[i];
break;
}
}
if (pipe_ctx == NULL)
return false;
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
if (link->test_pattern_enabled && test_pattern ==
DP_TEST_PATTERN_VIDEO_MODE) {

View File

@ -470,7 +470,7 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
{
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
uint32_t val = 0;
uint32_t val = 0xf;
if (opp_id < MAX_OPP && REG(MUX[opp_id]))
REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);

View File

@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = true,
.pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
.disable_stereo_support = true,

View File

@ -1731,6 +1731,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
.set_mcif_arb_params = dcn30_set_mcif_arb_params,

View File

@ -2635,14 +2635,15 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.DRAMClockChangeWatermark >
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
mode_lib->vba.MinTTUVBlank[k] += 25;
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
if (mode_lib->vba.DRAMClockChangeWatermark >
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;

View File

@ -1163,7 +1163,14 @@ retry:
if (ret)
goto out;
if (old_fb->format != fb->format) {
/*
* Only check the FOURCC format code, excluding modifiers. This is
* enough for all legacy drivers. Atomic drivers have their own
* checks in their ->atomic_check implementation, which will
* return -EINVAL if any hw or driver constraint is violated due
* to modifier changes.
*/
if (old_fb->format->format != fb->format->format) {
DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
ret = -EINVAL;
goto out;

View File

@ -38,6 +38,7 @@ i915-y += i915_drv.o \
i915_config.o \
i915_irq.o \
i915_getparam.o \
i915_mitigations.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \

View File

@ -1616,10 +1616,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder,
get_dsi_io_power_domains(i915,
enc_to_intel_dsi(encoder));
if (crtc_state->dsc.compression_enable)
intel_display_power_get(i915,
intel_dsc_power_domain(crtc_state));
}
static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,

View File

@ -1650,16 +1650,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
val = pch_get_backlight(connector);
else
val = lpt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
if (cpu_mode) {
drm_dbg_kms(&dev_priv->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, panel->backlight.level);
lpt_set_backlight(connector->base.state, val);
intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
@ -1667,6 +1664,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
return 0;
}

View File

@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_prepare(encoder, pipe_config);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
/* Deassert reset */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
/*
* Give the panel time to power-on and then deassert its reset.
* Depending on the VBT MIPI sequences version the deassert-seq
* may contain the necessary delay, intel_dsi_msleep() will skip
* the delay in that case. If there is no deassert-seq, then an
* unconditional msleep is used to give the panel time to power-on.
*/
if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
} else {
msleep(intel_dsi->panel_on_delay);
}
if (IS_GEMINILAKE(dev_priv)) {
glk_cold_boot = glk_dsi_enable_io(encoder);

View File

@ -7,8 +7,6 @@
#include "i915_drv.h"
#include "intel_gpu_commands.h"
#define MAX_URB_ENTRIES 64
#define STATE_SIZE (4 * 1024)
#define GT3_INLINE_DATA_DELAYS 0x1E00
#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
@ -34,38 +32,59 @@ struct batch_chunk {
};
struct batch_vals {
u32 max_primitives;
u32 max_urb_entries;
u32 cmd_size;
u32 state_size;
u32 max_threads;
u32 state_start;
u32 batch_size;
u32 surface_start;
u32 surface_height;
u32 surface_width;
u32 scratch_size;
u32 max_size;
u32 size;
};
static inline int num_primitives(const struct batch_vals *bv)
{
/*
* We need to saturate the GPU with work in order to dispatch
* a shader on every HW thread, and clear the thread-local registers.
* In short, we have to dispatch work faster than the shaders can
* run in order to fill the EU and occupy each HW thread.
*/
return bv->max_threads;
}
static void
batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
{
if (IS_HASWELL(i915)) {
bv->max_primitives = 280;
bv->max_urb_entries = MAX_URB_ENTRIES;
switch (INTEL_INFO(i915)->gt) {
default:
case 1:
bv->max_threads = 70;
break;
case 2:
bv->max_threads = 140;
break;
case 3:
bv->max_threads = 280;
break;
}
bv->surface_height = 16 * 16;
bv->surface_width = 32 * 2 * 16;
} else {
bv->max_primitives = 128;
bv->max_urb_entries = MAX_URB_ENTRIES / 2;
switch (INTEL_INFO(i915)->gt) {
default:
case 1: /* including vlv */
bv->max_threads = 36;
break;
case 2:
bv->max_threads = 128;
break;
}
bv->surface_height = 16 * 8;
bv->surface_width = 32 * 16;
}
bv->cmd_size = bv->max_primitives * 4096;
bv->state_size = STATE_SIZE;
bv->state_start = bv->cmd_size;
bv->batch_size = bv->cmd_size + bv->state_size;
bv->scratch_size = bv->surface_height * bv->surface_width;
bv->max_size = bv->batch_size + bv->scratch_size;
bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K);
bv->surface_start = bv->state_start + SZ_4K;
bv->size = bv->surface_start + bv->surface_height * bv->surface_width;
}
static void batch_init(struct batch_chunk *bc,
@ -155,7 +174,8 @@ static u32
gen7_fill_binding_table(struct batch_chunk *state,
const struct batch_vals *bv)
{
u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
u32 surface_start =
gen7_fill_surface_state(state, bv->surface_start, bv);
u32 *cs = batch_alloc_items(state, 32, 8);
u32 offset = batch_offset(state, cs);
@ -214,9 +234,9 @@ static void
gen7_emit_state_base_address(struct batch_chunk *batch,
u32 surface_state_base)
{
u32 *cs = batch_alloc_items(batch, 0, 12);
u32 *cs = batch_alloc_items(batch, 0, 10);
*cs++ = STATE_BASE_ADDRESS | (12 - 2);
*cs++ = STATE_BASE_ADDRESS | (10 - 2);
/* general */
*cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
/* surface */
@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch,
*cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = BASE_ADDRESS_MODIFY;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs);
}
@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
u32 urb_size, u32 curbe_size,
u32 mode)
{
u32 urb_entries = bv->max_urb_entries;
u32 threads = bv->max_primitives - 1;
u32 threads = bv->max_threads - 1;
u32 *cs = batch_alloc_items(batch, 32, 8);
*cs++ = MEDIA_VFE_STATE | (8 - 2);
@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch,
*cs++ = 0;
/* number of threads & urb entries for GPGPU vs Media Mode */
*cs++ = threads << 16 | urb_entries << 8 | mode << 2;
*cs++ = threads << 16 | 1 << 8 | mode << 2;
*cs++ = 0;
@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch,
{
unsigned int x_offset = (media_object_index % 16) * 64;
unsigned int y_offset = (media_object_index / 16) * 16;
unsigned int inline_data_size;
unsigned int media_batch_size;
unsigned int i;
unsigned int pkt = 6 + 3;
u32 *cs;
inline_data_size = 112 * 8;
media_batch_size = inline_data_size + 6;
cs = batch_alloc_items(batch, 8, pkt);
cs = batch_alloc_items(batch, 8, media_batch_size);
*cs++ = MEDIA_OBJECT | (media_batch_size - 2);
*cs++ = MEDIA_OBJECT | (pkt - 2);
/* interface descriptor offset */
*cs++ = 0;
@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch,
*cs++ = 0;
/* inline */
*cs++ = (y_offset << 16) | (x_offset);
*cs++ = y_offset << 16 | x_offset;
*cs++ = 0;
*cs++ = GT3_INLINE_DATA_DELAYS;
for (i = 3; i < inline_data_size; i++)
*cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
{
u32 *cs = batch_alloc_items(batch, 0, 5);
u32 *cs = batch_alloc_items(batch, 0, 4);
*cs++ = GFX_OP_PIPE_CONTROL(5);
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
PIPE_CONTROL_GLOBAL_GTT_IVB;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
PIPE_CONTROL_DEPTH_CACHE_FLUSH |
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs);
}
static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch)
{
u32 *cs = batch_alloc_items(batch, 0, 8);
/* ivb: Stall before STATE_CACHE_INVALIDATE */
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD |
PIPE_CONTROL_CS_STALL;
*cs++ = 0;
*cs++ = 0;
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE;
*cs++ = 0;
*cs++ = 0;
batch_advance(batch, cs);
}
@ -344,34 +375,34 @@ static void emit_batch(struct i915_vma * const vma,
const struct batch_vals *bv)
{
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int desc_count = 64;
const u32 urb_size = 112;
const unsigned int desc_count = 1;
const unsigned int urb_size = 1;
struct batch_chunk cmds, state;
u32 interface_descriptor;
u32 descriptors;
unsigned int i;
batch_init(&cmds, vma, start, 0, bv->cmd_size);
batch_init(&state, vma, start, bv->state_start, bv->state_size);
batch_init(&cmds, vma, start, 0, bv->state_start);
batch_init(&state, vma, start, bv->state_start, SZ_4K);
interface_descriptor =
gen7_fill_interface_descriptor(&state, bv,
IS_HASWELL(i915) ?
&cb_kernel_hsw :
&cb_kernel_ivb,
desc_count);
gen7_emit_pipeline_flush(&cmds);
descriptors = gen7_fill_interface_descriptor(&state, bv,
IS_HASWELL(i915) ?
&cb_kernel_hsw :
&cb_kernel_ivb,
desc_count);
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
batch_add(&cmds, MI_NOOP);
gen7_emit_state_base_address(&cmds, interface_descriptor);
gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_pipeline_flush(&cmds);
gen7_emit_state_base_address(&cmds, descriptors);
gen7_emit_pipeline_invalidate(&cmds);
gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
gen7_emit_interface_descriptor_load(&cmds,
interface_descriptor,
desc_count);
for (i = 0; i < bv->max_primitives; i++)
for (i = 0; i < num_primitives(bv); i++)
gen7_emit_media_object(&cmds, i);
batch_add(&cmds, MI_BATCH_BUFFER_END);
@ -385,15 +416,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
batch_get_defaults(engine->i915, &bv);
if (!vma)
return bv.max_size;
return bv.size;
GEM_BUG_ON(vma->obj->base.size < bv.max_size);
GEM_BUG_ON(vma->obj->base.size < bv.size);
batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(batch))
return PTR_ERR(batch);
emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
emit_batch(vma, memset(batch, 0, bv.size), &bv);
i915_gem_object_flush_map(vma->obj);
__i915_gem_object_release_map(vma->obj);

View File

@ -32,6 +32,7 @@
#include "gen6_ppgtt.h"
#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_mitigations.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_gt.h"
@ -886,7 +887,8 @@ static int switch_context(struct i915_request *rq)
GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
if (engine->wa_ctx.vma->private != ce) {
if (engine->wa_ctx.vma->private != ce &&
i915_mitigate_clear_residuals()) {
ret = clear_residuals(rq);
if (ret)
return ret;
@ -1290,7 +1292,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
err = gen7_ctx_switch_bb_init(engine);
if (err)
goto err_ring_unpin;

View File

@ -217,6 +217,15 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
DDI_BUF_CTL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
}
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
/* No hpd_invert set in vgpu vbt, need to clear invert mask */
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
@ -273,6 +282,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTA_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
@ -301,6 +312,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTB_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
@ -329,6 +342,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTC_HOTPLUG_ENABLE;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
}
@ -661,44 +676,62 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
PORTD_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
} else if (IS_BROXTON(i915)) {
if (connected) {
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDIC_DETECTED;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
}
} else {
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
} else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~PORTA_HOTPLUG_STATUS_MASK;
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTA_HOTPLUG_LONG_DETECT;
intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDIB_DETECTED;
} else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIC_DETECTED;
~SFUSE_STRAP_DDIB_DETECTED;
}
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~PORTB_HOTPLUG_STATUS_MASK;
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTB_HOTPLUG_LONG_DETECT;
intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
if (connected) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDIC_DETECTED;
} else {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIC_DETECTED;
}
vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
~PORTC_HOTPLUG_STATUS_MASK;
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTC_HOTPLUG_LONG_DETECT;
intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
}
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTB_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
}
}

View File

@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
if (IS_BROADWELL(dev_priv))
if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
/* FixMe: Re-enable APL/BXT once vfio_edid enabled */
else if (!IS_BROXTON(dev_priv))
else
ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;

View File

@ -1047,6 +1047,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
void i915_driver_shutdown(struct drm_i915_private *i915)
{
disable_rpm_wakeref_asserts(&i915->runtime_pm);
i915_gem_suspend(i915);
drm_kms_helper_poll_disable(&i915->drm);
@ -1060,6 +1062,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_suspend_encoders(i915);
intel_shutdown_encoders(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)

View File

@ -0,0 +1,146 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "i915_drv.h"
#include "i915_mitigations.h"
static unsigned long mitigations __read_mostly = ~0UL;
enum {
CLEAR_RESIDUALS = 0,
};
static const char * const names[] = {
[CLEAR_RESIDUALS] = "residuals",
};
bool i915_mitigate_clear_residuals(void)
{
return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS);
}
static int mitigations_set(const char *val, const struct kernel_param *kp)
{
unsigned long new = ~0UL;
char *str, *sep, *tok;
bool first = true;
int err = 0;
BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations));
str = kstrdup(val, GFP_KERNEL);
if (!str)
return -ENOMEM;
for (sep = str; (tok = strsep(&sep, ","));) {
bool enable = true;
int i;
/* Be tolerant of leading/trailing whitespace */
tok = strim(tok);
if (first) {
first = false;
if (!strcmp(tok, "auto"))
continue;
new = 0;
if (!strcmp(tok, "off"))
continue;
}
if (*tok == '!') {
enable = !enable;
tok++;
}
if (!strncmp(tok, "no", 2)) {
enable = !enable;
tok += 2;
}
if (*tok == '\0')
continue;
for (i = 0; i < ARRAY_SIZE(names); i++) {
if (!strcmp(tok, names[i])) {
if (enable)
new |= BIT(i);
else
new &= ~BIT(i);
break;
}
}
if (i == ARRAY_SIZE(names)) {
pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n",
DRIVER_NAME, val, tok);
err = -EINVAL;
break;
}
}
kfree(str);
if (err)
return err;
WRITE_ONCE(mitigations, new);
return 0;
}
static int mitigations_get(char *buffer, const struct kernel_param *kp)
{
unsigned long local = READ_ONCE(mitigations);
int count, i;
bool enable;
if (!local)
return scnprintf(buffer, PAGE_SIZE, "%s\n", "off");
if (local & BIT(BITS_PER_LONG - 1)) {
count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto");
enable = false;
} else {
enable = true;
count = 0;
}
for (i = 0; i < ARRAY_SIZE(names); i++) {
if ((local & BIT(i)) != enable)
continue;
count += scnprintf(buffer + count, PAGE_SIZE - count,
"%s%s,", enable ? "" : "!", names[i]);
}
buffer[count - 1] = '\n';
return count;
}
static const struct kernel_param_ops ops = {
.set = mitigations_set,
.get = mitigations_get,
};
module_param_cb_unsafe(mitigations, &ops, NULL, 0600);
MODULE_PARM_DESC(mitigations,
"Selectively enable security mitigations for all Intel® GPUs in the system.\n"
"\n"
" auto -- enables all mitigations required for the platform [default]\n"
" off -- disables all mitigations\n"
"\n"
"Individual mitigations can be enabled by passing a comma-separated string,\n"
"e.g. mitigations=residuals to enable only clearing residuals or\n"
"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n"
"Either '!' or 'no' may be used to switch from enabling the mitigation to\n"
"disabling it.\n"
"\n"
"Active mitigations for Ivybridge, Baytrail, Haswell:\n"
" residuals -- clear all thread-local registers between contexts"
);

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#ifndef __I915_MITIGATIONS_H__
#define __I915_MITIGATIONS_H__
#include <linux/types.h>
bool i915_mitigate_clear_residuals(void);
#endif /* __I915_MITIGATIONS_H__ */

View File

@ -37,6 +37,7 @@ nouveau-y += dispnv50/wimmc37b.o
nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
nouveau-y += dispnv50/wndwc57e.o
nouveau-y += dispnv50/wndwc67e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o

View File

@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
{ GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },

View File

@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
{ GA102_DISP_CURSOR, 0, cursc37a_new },
{ TU102_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
{ GK104_DISP_CURSOR, 0, curs907a_new },

View File

@ -222,7 +222,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
@ -271,7 +271,7 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
if (ret)
return ret;
if (!syncbuf)
if (syncbuf < 0)
return 0;
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,

View File

@ -95,7 +95,7 @@ struct nv50_outp_atom {
int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
u64 syncbuf, struct nv50_dmac *dmac);
s64 syncbuf, struct nv50_dmac *dmac);
void nv50_dmac_destroy(struct nv50_dmac *);
/*

View File

@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
{ GA102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ TU102_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{}

View File

@ -76,7 +76,7 @@ wimmc37b_init_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
int ret;
ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
&oclass, 0, &args, sizeof(args), 0,
&oclass, 0, &args, sizeof(args), -1,
&wndw->wimm);
if (ret) {
NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);

View File

@ -784,6 +784,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
{ GA102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc67e_new },
{ TU102_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
{}

View File

@ -129,6 +129,14 @@ int wndwc37e_update(struct nv50_wndw *, u32 *);
int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
bool wndwc57e_ilut(struct nv50_wndw *, struct nv50_wndw_atom *, int);
int wndwc57e_ilut_set(struct nv50_wndw *, struct nv50_wndw_atom *);
int wndwc57e_ilut_clr(struct nv50_wndw *);
int wndwc57e_csc_set(struct nv50_wndw *, struct nv50_wndw_atom *);
int wndwc57e_csc_clr(struct nv50_wndw *);
int wndwc67e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);

View File

@ -80,7 +80,7 @@ wndwc57e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
static int
int
wndwc57e_csc_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@ -98,7 +98,7 @@ wndwc57e_csc_clr(struct nv50_wndw *wndw)
return 0;
}
static int
int
wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@ -111,7 +111,7 @@ wndwc57e_csc_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
static int
int
wndwc57e_ilut_clr(struct nv50_wndw *wndw)
{
struct nvif_push *push = wndw->wndw.push;
@ -124,7 +124,7 @@ wndwc57e_ilut_clr(struct nv50_wndw *wndw)
return 0;
}
static int
int
wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
@ -179,7 +179,7 @@ wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
writew(readw(mem - 4), mem + 4);
}
static bool
bool
wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
{
if (size = size ? size : 1024, size != 256 && size != 1024)

View File

@ -0,0 +1,106 @@
/*
* Copyright 2021 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "wndw.h"
#include "atom.h"
#include <nvif/pushc37b.h>
#include <nvhw/class/clc57e.h>
static int
wndwc67e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
struct nvif_push *push = wndw->wndw.push;
int ret;
if ((ret = PUSH_WAIT(push, 17)))
return ret;
PUSH_MTHD(push, NVC57E, SET_PRESENT_CONTROL,
NVVAL(NVC57E, SET_PRESENT_CONTROL, MIN_PRESENT_INTERVAL, asyw->image.interval) |
NVVAL(NVC57E, SET_PRESENT_CONTROL, BEGIN_MODE, asyw->image.mode) |
NVDEF(NVC57E, SET_PRESENT_CONTROL, TIMESTAMP_MODE, DISABLE));
PUSH_MTHD(push, NVC57E, SET_SIZE,
NVVAL(NVC57E, SET_SIZE, WIDTH, asyw->image.w) |
NVVAL(NVC57E, SET_SIZE, HEIGHT, asyw->image.h),
SET_STORAGE,
NVVAL(NVC57E, SET_STORAGE, BLOCK_HEIGHT, asyw->image.blockh),
SET_PARAMS,
NVVAL(NVC57E, SET_PARAMS, FORMAT, asyw->image.format) |
NVDEF(NVC57E, SET_PARAMS, CLAMP_BEFORE_BLEND, DISABLE) |
NVDEF(NVC57E, SET_PARAMS, SWAP_UV, DISABLE) |
NVDEF(NVC57E, SET_PARAMS, FMT_ROUNDING_MODE, ROUND_TO_NEAREST),
SET_PLANAR_STORAGE(0),
NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.blocks[0]) |
NVVAL(NVC57E, SET_PLANAR_STORAGE, PITCH, asyw->image.pitch[0] >> 6));
PUSH_MTHD(push, NVC57E, SET_CONTEXT_DMA_ISO(0), asyw->image.handle, 1);
PUSH_MTHD(push, NVC57E, SET_OFFSET(0), asyw->image.offset[0] >> 8);
PUSH_MTHD(push, NVC57E, SET_POINT_IN(0),
NVVAL(NVC57E, SET_POINT_IN, X, asyw->state.src_x >> 16) |
NVVAL(NVC57E, SET_POINT_IN, Y, asyw->state.src_y >> 16));
PUSH_MTHD(push, NVC57E, SET_SIZE_IN,
NVVAL(NVC57E, SET_SIZE_IN, WIDTH, asyw->state.src_w >> 16) |
NVVAL(NVC57E, SET_SIZE_IN, HEIGHT, asyw->state.src_h >> 16));
PUSH_MTHD(push, NVC57E, SET_SIZE_OUT,
NVVAL(NVC57E, SET_SIZE_OUT, WIDTH, asyw->state.crtc_w) |
NVVAL(NVC57E, SET_SIZE_OUT, HEIGHT, asyw->state.crtc_h));
return 0;
}
static const struct nv50_wndw_func
wndwc67e = {
.acquire = wndwc37e_acquire,
.release = wndwc37e_release,
.sema_set = wndwc37e_sema_set,
.sema_clr = wndwc37e_sema_clr,
.ntfy_set = wndwc37e_ntfy_set,
.ntfy_clr = wndwc37e_ntfy_clr,
.ntfy_reset = corec37d_ntfy_init,
.ntfy_wait_begun = base507c_ntfy_wait_begun,
.ilut = wndwc57e_ilut,
.ilut_identity = true,
.ilut_size = 1024,
.xlut_set = wndwc57e_ilut_set,
.xlut_clr = wndwc57e_ilut_clr,
.csc = base907c_csc,
.csc_set = wndwc57e_csc_set,
.csc_clr = wndwc57e_csc_clr,
.image_set = wndwc67e_image_set,
.image_clr = wndwc37e_image_clr,
.blend_set = wndwc37e_blend_set,
.update = wndwc37e_update,
};
int
wndwc67e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
s32 oclass, struct nv50_wndw **pwndw)
{
return wndwc37e_new_(&wndwc67e, drm, type, index, oclass, BIT(index >> 1), pwndw);
}

View File

@ -33,6 +33,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_PASCAL 0x0a
#define NV_DEVICE_INFO_V0_VOLTA 0x0b
#define NV_DEVICE_INFO_V0_TURING 0x0c
#define NV_DEVICE_INFO_V0_AMPERE 0x0d
__u8 family;
__u8 pad06[2];
__u64 ram_size;

View File

@ -88,6 +88,7 @@
#define GP102_DISP /* cl5070.h */ 0x00009870
#define GV100_DISP /* cl5070.h */ 0x0000c370
#define TU102_DISP /* cl5070.h */ 0x0000c570
#define GA102_DISP /* cl5070.h */ 0x0000c670
#define GV100_DISP_CAPS 0x0000c373
@ -103,6 +104,7 @@
#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
#define GV100_DISP_CURSOR /* cl507a.h */ 0x0000c37a
#define TU102_DISP_CURSOR /* cl507a.h */ 0x0000c57a
#define GA102_DISP_CURSOR /* cl507a.h */ 0x0000c67a
#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
@ -112,6 +114,7 @@
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c37b
#define TU102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c57b
#define GA102_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c67b
#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
@ -135,6 +138,7 @@
#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define GV100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c37d
#define TU102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c57d
#define GA102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c67d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@ -145,6 +149,7 @@
#define GV100_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c37e
#define TU102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c57e
#define GA102_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c67e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297

View File

@ -120,6 +120,7 @@ struct nvkm_device {
GP100 = 0x130,
GV100 = 0x140,
TU100 = 0x160,
GA100 = 0x170,
} card_type;
u32 chipset;
u8 chiprev;

View File

@ -37,4 +37,5 @@ int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int tu102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int ga102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif

View File

@ -32,4 +32,5 @@ int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int tu102_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int ga100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
#endif

View File

@ -86,6 +86,8 @@ int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gv100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int ga100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int ga102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>

View File

@ -37,4 +37,5 @@ int nv50_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int g94_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int gf119_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int gk104_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
int ga102_gpio_new(struct nvkm_device *, int, struct nvkm_gpio **);
#endif

View File

@ -92,6 +92,7 @@ int g94_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **);
static inline int

View File

@ -32,4 +32,5 @@ int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int tu102_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int ga100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif

View File

@ -256,6 +256,7 @@ nouveau_backlight_init(struct drm_connector *connector)
case NV_DEVICE_INFO_V0_PASCAL:
case NV_DEVICE_INFO_V0_VOLTA:
case NV_DEVICE_INFO_V0_TURING:
case NV_DEVICE_INFO_V0_AMPERE: //XXX: not confirmed
ret = nv50_backlight_init(nv_encoder, &props, &ops);
break;
default:

View File

@ -35,6 +35,7 @@ nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass,
struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
{ GA102_DISP, -1 },
{ TU102_DISP, -1 },
{ GV100_DISP, -1 },
{ GP102_DISP, -1 },

View File

@ -1815,7 +1815,7 @@ nvf0_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@ -1853,7 +1853,7 @@ nvf1_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@ -1891,7 +1891,7 @@ nv106_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@ -1929,7 +1929,7 @@ nv108_chipset = {
.fb = gk110_fb_new,
.fuse = gf100_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@ -1967,7 +1967,7 @@ nv117_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@ -2003,7 +2003,7 @@ nv118_chipset = {
.fb = gm107_fb_new,
.fuse = gm107_fuse_new,
.gpio = gk104_gpio_new,
.i2c = gk104_i2c_new,
.i2c = gk110_i2c_new,
.ibus = gk104_ibus_new,
.iccsense = gf100_iccsense_new,
.imem = nv50_instmem_new,
@ -2652,6 +2652,61 @@ nv168_chipset = {
.sec2 = tu102_sec2_new,
};
static const struct nvkm_device_chip
nv170_chipset = {
.name = "GA100",
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.devinit = ga100_devinit_new,
.fb = ga100_fb_new,
.gpio = gk104_gpio_new,
.i2c = gm200_i2c_new,
.ibus = gm200_ibus_new,
.imem = nv50_instmem_new,
.mc = ga100_mc_new,
.mmu = tu102_mmu_new,
.pci = gp100_pci_new,
.timer = gk20a_timer_new,
};
static const struct nvkm_device_chip
nv172_chipset = {
.name = "GA102",
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.devinit = ga100_devinit_new,
.fb = ga102_fb_new,
.gpio = ga102_gpio_new,
.i2c = gm200_i2c_new,
.ibus = gm200_ibus_new,
.imem = nv50_instmem_new,
.mc = ga100_mc_new,
.mmu = tu102_mmu_new,
.pci = gp100_pci_new,
.timer = gk20a_timer_new,
.disp = ga102_disp_new,
.dma = gv100_dma_new,
};
static const struct nvkm_device_chip
nv174_chipset = {
.name = "GA104",
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.devinit = ga100_devinit_new,
.fb = ga102_fb_new,
.gpio = ga102_gpio_new,
.i2c = gm200_i2c_new,
.ibus = gm200_ibus_new,
.imem = nv50_instmem_new,
.mc = ga100_mc_new,
.mmu = tu102_mmu_new,
.pci = gp100_pci_new,
.timer = gk20a_timer_new,
.disp = ga102_disp_new,
.dma = gv100_dma_new,
};
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@ -3063,6 +3118,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x130: device->card_type = GP100; break;
case 0x140: device->card_type = GV100; break;
case 0x160: device->card_type = TU100; break;
case 0x170: device->card_type = GA100; break;
default:
break;
}
@ -3160,10 +3216,23 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x166: device->chip = &nv166_chipset; break;
case 0x167: device->chip = &nv167_chipset; break;
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
ret = -ENODEV;
goto done;
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
switch (device->chipset) {
case 0x170: device->chip = &nv170_chipset; break;
default:
break;
}
}
if (!device->chip) {
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
ret = -ENODEV;
goto done;
}
break;
}
nvdev_info(device, "NVIDIA %s (%08x)\n",

View File

@ -176,6 +176,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
default:
args->v0.family = 0;
break;

Some files were not shown because too many files have changed in this diff Show More