mirror of
https://github.com/torvalds/linux.git
synced 2024-11-15 08:31:55 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Two easy cases of overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
8b44836583
@ -20,6 +20,8 @@ Required properties:
|
||||
Optional properties:
|
||||
- phy-handle: See ethernet.txt file in the same directory.
|
||||
If absent, davinci_emac driver defaults to 100/FULL.
|
||||
- nvmem-cells: phandle, reference to an nvmem node for the MAC address
|
||||
- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
|
||||
- ti,davinci-rmii-en: 1 byte, 1 means use RMII
|
||||
- ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
|
||||
|
||||
|
@ -10,15 +10,14 @@ Documentation/devicetree/bindings/phy/phy-bindings.txt.
|
||||
the boot program; should be used in cases where the MAC address assigned to
|
||||
the device by the boot program is different from the "local-mac-address"
|
||||
property;
|
||||
- nvmem-cells: phandle, reference to an nvmem node for the MAC address;
|
||||
- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used;
|
||||
- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
|
||||
- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
|
||||
the maximum frame size (there's contradiction in the Devicetree
|
||||
Specification).
|
||||
- phy-mode: string, operation mode of the PHY interface. This is now a de-facto
|
||||
standard property; supported values are:
|
||||
* "internal"
|
||||
* "internal" (Internal means there is not a standard bus between the MAC and
|
||||
the PHY, something proprietary is being used to embed the PHY in the MAC.)
|
||||
* "mii"
|
||||
* "gmii"
|
||||
* "sgmii"
|
||||
|
@ -26,6 +26,10 @@ Required properties:
|
||||
Optional elements: 'tsu_clk'
|
||||
- clocks: Phandles to input clocks.
|
||||
|
||||
Optional properties:
|
||||
- nvmem-cells: phandle, reference to an nvmem node for the MAC address
|
||||
- nvmem-cell-names: string, should be "mac-address" if nvmem is to be used
|
||||
|
||||
Optional properties for PHY child node:
|
||||
- reset-gpios : Should specify the gpio for phy reset
|
||||
- magic-packet : If present, indicates that the hardware supports waking
|
||||
|
@ -623,7 +623,7 @@ the remote via /dev/input/event devices.
|
||||
|
||||
- .. row 78
|
||||
|
||||
- ``KEY_SCREEN``
|
||||
- ``KEY_ASPECT_RATIO``
|
||||
|
||||
- Select screen aspect ratio
|
||||
|
||||
@ -631,7 +631,7 @@ the remote via /dev/input/event devices.
|
||||
|
||||
- .. row 79
|
||||
|
||||
- ``KEY_ZOOM``
|
||||
- ``KEY_FULL_SCREEN``
|
||||
|
||||
- Put device into zoom/full screen mode
|
||||
|
||||
|
@ -22,8 +22,6 @@ you'll need the following options as well...
|
||||
CONFIG_DECNET_ROUTER (to be able to add/delete routes)
|
||||
CONFIG_NETFILTER (will be required for the DECnet routing daemon)
|
||||
|
||||
CONFIG_DECNET_ROUTE_FWMARK is optional
|
||||
|
||||
Don't turn on SIOCGIFCONF support for DECnet unless you are really sure
|
||||
that you need it, in general you won't and it can cause ifconfig to
|
||||
malfunction.
|
||||
|
@ -427,6 +427,7 @@ tcp_min_rtt_wlen - INTEGER
|
||||
minimum RTT when it is moved to a longer path (e.g., due to traffic
|
||||
engineering). A longer window makes the filter more resistant to RTT
|
||||
inflations such as transient congestion. The unit is seconds.
|
||||
Possible values: 0 - 86400 (1 day)
|
||||
Default: 300
|
||||
|
||||
tcp_moderate_rcvbuf - BOOLEAN
|
||||
|
@ -3124,6 +3124,7 @@ F: drivers/cpufreq/bmips-cpufreq.c
|
||||
BROADCOM BMIPS MIPS ARCHITECTURE
|
||||
M: Kevin Cernekee <cernekee@gmail.com>
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: bcm-kernel-feedback-list@broadcom.com
|
||||
L: linux-mips@vger.kernel.org
|
||||
T: git git://github.com/broadcom/stblinux.git
|
||||
S: Maintained
|
||||
@ -7336,7 +7337,6 @@ F: Documentation/devicetree/bindings/i3c/
|
||||
F: Documentation/driver-api/i3c
|
||||
F: drivers/i3c/
|
||||
F: include/linux/i3c/
|
||||
F: include/dt-bindings/i3c/
|
||||
|
||||
I3C DRIVER FOR SYNOPSYS DESIGNWARE
|
||||
M: Vitor Soares <vitor.soares@synopsys.com>
|
||||
@ -8711,6 +8711,7 @@ F: scripts/leaking_addresses.pl
|
||||
LED SUBSYSTEM
|
||||
M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
R: Dan Murphy <dmurphy@ti.com>
|
||||
L: linux-leds@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
|
||||
S: Maintained
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Shy Crocodile
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -463,3 +463,7 @@
|
||||
532 common getppid sys_getppid
|
||||
# all other architectures have common numbers for new syscall, alpha
|
||||
# is the exception.
|
||||
534 common pidfd_send_signal sys_pidfd_send_signal
|
||||
535 common io_uring_setup sys_io_uring_setup
|
||||
536 common io_uring_enter sys_io_uring_enter
|
||||
537 common io_uring_register sys_io_uring_register
|
||||
|
@ -437,3 +437,7 @@
|
||||
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
|
||||
422 common futex_time64 sys_futex
|
||||
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register
|
||||
|
@ -50,7 +50,7 @@ do { \
|
||||
static inline int
|
||||
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
|
||||
{
|
||||
int oldval, ret, tmp;
|
||||
int oldval = 0, ret, tmp;
|
||||
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -44,7 +44,7 @@
|
||||
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
|
||||
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
|
||||
|
||||
#define __NR_compat_syscalls 424
|
||||
#define __NR_compat_syscalls 428
|
||||
#endif
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
|
@ -866,6 +866,14 @@ __SYSCALL(__NR_rt_sigtimedwait_time64, compat_sys_rt_sigtimedwait_time64)
|
||||
__SYSCALL(__NR_futex_time64, sys_futex)
|
||||
#define __NR_sched_rr_get_interval_time64 423
|
||||
__SYSCALL(__NR_sched_rr_get_interval_time64, sys_sched_rr_get_interval)
|
||||
#define __NR_pidfd_send_signal 424
|
||||
__SYSCALL(__NR_pidfd_send_signal, sys_pidfd_send_signal)
|
||||
#define __NR_io_uring_setup 425
|
||||
__SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
|
||||
#define __NR_io_uring_enter 426
|
||||
__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
|
||||
#define __NR_io_uring_register 427
|
||||
__SYSCALL(__NR_io_uring_register, sys_io_uring_register)
|
||||
|
||||
/*
|
||||
* Please add new compat syscalls above this comment and update
|
||||
|
@ -344,3 +344,7 @@
|
||||
332 common pkey_free sys_pkey_free
|
||||
333 common rseq sys_rseq
|
||||
# 334 through 423 are reserved to sync up with other architectures
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register
|
||||
|
@ -423,3 +423,7 @@
|
||||
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
|
||||
422 common futex_time64 sys_futex
|
||||
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register
|
||||
|
@ -429,3 +429,7 @@
|
||||
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
|
||||
422 common futex_time64 sys_futex
|
||||
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register
|
||||
|
@ -210,12 +210,6 @@ const char *get_system_type(void)
|
||||
return ath79_sys_type;
|
||||
}
|
||||
|
||||
int get_c0_perfcount_int(void)
|
||||
{
|
||||
return ATH79_MISC_IRQ(5);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
unsigned int get_c0_compare_int(void)
|
||||
{
|
||||
return CP0_LEGACY_COMPARE_IRQ;
|
||||
|
@ -125,7 +125,7 @@ trace_a_syscall:
|
||||
subu t1, v0, __NR_O32_Linux
|
||||
move a1, v0
|
||||
bnez t1, 1f /* __NR_syscall at offset 0 */
|
||||
lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
|
||||
ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
|
||||
.set pop
|
||||
|
||||
1: jal syscall_trace_enter
|
||||
|
@ -362,3 +362,7 @@
|
||||
421 n32 rt_sigtimedwait_time64 compat_sys_rt_sigtimedwait_time64
|
||||
422 n32 futex_time64 sys_futex
|
||||
423 n32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
|
||||
424 n32 pidfd_send_signal sys_pidfd_send_signal
|
||||
425 n32 io_uring_setup sys_io_uring_setup
|
||||
426 n32 io_uring_enter sys_io_uring_enter
|
||||
427 n32 io_uring_register sys_io_uring_register
|
||||
|
@ -338,3 +338,7 @@
|
||||
327 n64 rseq sys_rseq
|
||||
328 n64 io_pgetevents sys_io_pgetevents
|
||||
# 329 through 423 are reserved to sync up with other architectures
|
||||
424 n64 pidfd_send_signal sys_pidfd_send_signal
|
||||
425 n64 io_uring_setup sys_io_uring_setup
|
||||
426 n64 io_uring_enter sys_io_uring_enter
|
||||
427 n64 io_uring_register sys_io_uring_register
|
||||
|
@ -411,3 +411,7 @@
|
||||
421 o32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
|
||||
422 o32 futex_time64 sys_futex sys_futex
|
||||
423 o32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
|
||||
424 o32 pidfd_send_signal sys_pidfd_send_signal
|
||||
425 o32 io_uring_setup sys_io_uring_setup
|
||||
426 o32 io_uring_enter sys_io_uring_enter
|
||||
427 o32 io_uring_register sys_io_uring_register
|
||||
|
@ -420,3 +420,7 @@
|
||||
421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
|
||||
422 32 futex_time64 sys_futex sys_futex
|
||||
423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register
|
||||
|
@ -505,3 +505,7 @@
|
||||
421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
|
||||
422 32 futex_time64 sys_futex sys_futex
|
||||
423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register
|
||||
|
@ -25,7 +25,7 @@ static void *mem_detect_alloc_extended(void)
|
||||
{
|
||||
unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
|
||||
|
||||
if (IS_ENABLED(BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
|
||||
INITRD_START < offset + ENTRIES_EXTENDED_MAX)
|
||||
offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
|
||||
|
||||
|
@ -23,7 +23,7 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags)
|
||||
|
||||
if (flags & KERNEL_FPC)
|
||||
/* Save floating point control */
|
||||
asm volatile("stfpc %0" : "=m" (state->fpc));
|
||||
asm volatile("stfpc %0" : "=Q" (state->fpc));
|
||||
|
||||
if (!MACHINE_HAS_VX) {
|
||||
if (flags & KERNEL_VXR_V0V7) {
|
||||
|
@ -426,3 +426,7 @@
|
||||
421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64
|
||||
422 32 futex_time64 - sys_futex
|
||||
423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register sys_io_uring_register
|
||||
|
@ -37,7 +37,7 @@ static inline u64 get_vtimer(void)
|
||||
{
|
||||
u64 timer;
|
||||
|
||||
asm volatile("stpt %0" : "=m" (timer));
|
||||
asm volatile("stpt %0" : "=Q" (timer));
|
||||
return timer;
|
||||
}
|
||||
|
||||
@ -48,7 +48,7 @@ static inline void set_vtimer(u64 expires)
|
||||
asm volatile(
|
||||
" stpt %0\n" /* Store current cpu timer value */
|
||||
" spt %1" /* Set new value imm. afterwards */
|
||||
: "=m" (timer) : "m" (expires));
|
||||
: "=Q" (timer) : "Q" (expires));
|
||||
S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
|
||||
S390_lowcore.last_update_timer = expires;
|
||||
}
|
||||
@ -135,8 +135,8 @@ static int do_account_vtime(struct task_struct *tsk)
|
||||
#else
|
||||
" stck %1" /* Store current tod clock value */
|
||||
#endif
|
||||
: "=m" (S390_lowcore.last_update_timer),
|
||||
"=m" (S390_lowcore.last_update_clock));
|
||||
: "=Q" (S390_lowcore.last_update_timer),
|
||||
"=Q" (S390_lowcore.last_update_clock));
|
||||
clock = S390_lowcore.last_update_clock - clock;
|
||||
timer -= S390_lowcore.last_update_timer;
|
||||
|
||||
|
@ -426,3 +426,7 @@
|
||||
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
|
||||
422 common futex_time64 sys_futex
|
||||
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register
|
||||
|
@ -469,3 +469,7 @@
|
||||
421 32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
|
||||
422 32 futex_time64 sys_futex sys_futex
|
||||
423 32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register
|
||||
|
@ -1499,7 +1499,7 @@ config X86_CPA_STATISTICS
|
||||
depends on DEBUG_FS
|
||||
---help---
|
||||
Expose statistics about the Change Page Attribute mechanims, which
|
||||
helps to determine the effectivness of preserving large and huge
|
||||
helps to determine the effectiveness of preserving large and huge
|
||||
page mappings when mapping protections are changed.
|
||||
|
||||
config ARCH_HAS_MEM_ENCRYPT
|
||||
|
@ -323,6 +323,12 @@ ENTRY(poly1305_4block_avx2)
|
||||
vpaddq t2,t1,t1
|
||||
vmovq t1x,d4
|
||||
|
||||
# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
|
||||
# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
|
||||
# amount. Careful: we must not assume the carry bits 'd0 >> 26',
|
||||
# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
|
||||
# integers. It's true in a single-block implementation, but not here.
|
||||
|
||||
# d1 += d0 >> 26
|
||||
mov d0,%rax
|
||||
shr $26,%rax
|
||||
@ -361,16 +367,16 @@ ENTRY(poly1305_4block_avx2)
|
||||
# h0 += (d4 >> 26) * 5
|
||||
mov d4,%rax
|
||||
shr $26,%rax
|
||||
lea (%eax,%eax,4),%eax
|
||||
add %eax,%ebx
|
||||
lea (%rax,%rax,4),%rax
|
||||
add %rax,%rbx
|
||||
# h4 = d4 & 0x3ffffff
|
||||
mov d4,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h4
|
||||
|
||||
# h1 += h0 >> 26
|
||||
mov %ebx,%eax
|
||||
shr $26,%eax
|
||||
mov %rbx,%rax
|
||||
shr $26,%rax
|
||||
add %eax,h1
|
||||
# h0 = h0 & 0x3ffffff
|
||||
andl $0x3ffffff,%ebx
|
||||
|
@ -253,16 +253,16 @@ ENTRY(poly1305_block_sse2)
|
||||
# h0 += (d4 >> 26) * 5
|
||||
mov d4,%rax
|
||||
shr $26,%rax
|
||||
lea (%eax,%eax,4),%eax
|
||||
add %eax,%ebx
|
||||
lea (%rax,%rax,4),%rax
|
||||
add %rax,%rbx
|
||||
# h4 = d4 & 0x3ffffff
|
||||
mov d4,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h4
|
||||
|
||||
# h1 += h0 >> 26
|
||||
mov %ebx,%eax
|
||||
shr $26,%eax
|
||||
mov %rbx,%rax
|
||||
shr $26,%rax
|
||||
add %eax,h1
|
||||
# h0 = h0 & 0x3ffffff
|
||||
andl $0x3ffffff,%ebx
|
||||
@ -524,6 +524,12 @@ ENTRY(poly1305_2block_sse2)
|
||||
paddq t2,t1
|
||||
movq t1,d4
|
||||
|
||||
# Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 ->
|
||||
# h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small
|
||||
# amount. Careful: we must not assume the carry bits 'd0 >> 26',
|
||||
# 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit
|
||||
# integers. It's true in a single-block implementation, but not here.
|
||||
|
||||
# d1 += d0 >> 26
|
||||
mov d0,%rax
|
||||
shr $26,%rax
|
||||
@ -562,16 +568,16 @@ ENTRY(poly1305_2block_sse2)
|
||||
# h0 += (d4 >> 26) * 5
|
||||
mov d4,%rax
|
||||
shr $26,%rax
|
||||
lea (%eax,%eax,4),%eax
|
||||
add %eax,%ebx
|
||||
lea (%rax,%rax,4),%rax
|
||||
add %rax,%rbx
|
||||
# h4 = d4 & 0x3ffffff
|
||||
mov d4,%rax
|
||||
and $0x3ffffff,%eax
|
||||
mov %eax,h4
|
||||
|
||||
# h1 += h0 >> 26
|
||||
mov %ebx,%eax
|
||||
shr $26,%eax
|
||||
mov %rbx,%rax
|
||||
shr $26,%rax
|
||||
add %eax,h1
|
||||
# h0 = h0 & 0x3ffffff
|
||||
andl $0x3ffffff,%ebx
|
||||
|
@ -117,22 +117,39 @@ static __initconst const u64 amd_hw_cache_event_ids
|
||||
};
|
||||
|
||||
/*
|
||||
* AMD Performance Monitor K7 and later.
|
||||
* AMD Performance Monitor K7 and later, up to and including Family 16h:
|
||||
*/
|
||||
static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
|
||||
};
|
||||
|
||||
/*
|
||||
* AMD Performance Monitor Family 17h and later:
|
||||
*/
|
||||
static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
|
||||
};
|
||||
|
||||
static u64 amd_pmu_event_map(int hw_event)
|
||||
{
|
||||
if (boot_cpu_data.x86 >= 0x17)
|
||||
return amd_f17h_perfmon_event_map[hw_event];
|
||||
|
||||
return amd_perfmon_event_map[hw_event];
|
||||
}
|
||||
|
||||
|
@ -3131,7 +3131,7 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
|
||||
flags &= ~PERF_SAMPLE_TIME;
|
||||
if (!event->attr.exclude_kernel)
|
||||
flags &= ~PERF_SAMPLE_REGS_USER;
|
||||
if (event->attr.sample_regs_user & ~PEBS_REGS)
|
||||
if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
|
||||
flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
|
||||
return flags;
|
||||
}
|
||||
|
@ -96,25 +96,25 @@ struct amd_nb {
|
||||
PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER | \
|
||||
PERF_SAMPLE_PERIOD)
|
||||
|
||||
#define PEBS_REGS \
|
||||
(PERF_REG_X86_AX | \
|
||||
PERF_REG_X86_BX | \
|
||||
PERF_REG_X86_CX | \
|
||||
PERF_REG_X86_DX | \
|
||||
PERF_REG_X86_DI | \
|
||||
PERF_REG_X86_SI | \
|
||||
PERF_REG_X86_SP | \
|
||||
PERF_REG_X86_BP | \
|
||||
PERF_REG_X86_IP | \
|
||||
PERF_REG_X86_FLAGS | \
|
||||
PERF_REG_X86_R8 | \
|
||||
PERF_REG_X86_R9 | \
|
||||
PERF_REG_X86_R10 | \
|
||||
PERF_REG_X86_R11 | \
|
||||
PERF_REG_X86_R12 | \
|
||||
PERF_REG_X86_R13 | \
|
||||
PERF_REG_X86_R14 | \
|
||||
PERF_REG_X86_R15)
|
||||
#define PEBS_GP_REGS \
|
||||
((1ULL << PERF_REG_X86_AX) | \
|
||||
(1ULL << PERF_REG_X86_BX) | \
|
||||
(1ULL << PERF_REG_X86_CX) | \
|
||||
(1ULL << PERF_REG_X86_DX) | \
|
||||
(1ULL << PERF_REG_X86_DI) | \
|
||||
(1ULL << PERF_REG_X86_SI) | \
|
||||
(1ULL << PERF_REG_X86_SP) | \
|
||||
(1ULL << PERF_REG_X86_BP) | \
|
||||
(1ULL << PERF_REG_X86_IP) | \
|
||||
(1ULL << PERF_REG_X86_FLAGS) | \
|
||||
(1ULL << PERF_REG_X86_R8) | \
|
||||
(1ULL << PERF_REG_X86_R9) | \
|
||||
(1ULL << PERF_REG_X86_R10) | \
|
||||
(1ULL << PERF_REG_X86_R11) | \
|
||||
(1ULL << PERF_REG_X86_R12) | \
|
||||
(1ULL << PERF_REG_X86_R13) | \
|
||||
(1ULL << PERF_REG_X86_R14) | \
|
||||
(1ULL << PERF_REG_X86_R15))
|
||||
|
||||
/*
|
||||
* Per register state.
|
||||
|
@ -275,7 +275,7 @@ static const struct {
|
||||
const char *option;
|
||||
enum spectre_v2_user_cmd cmd;
|
||||
bool secure;
|
||||
} v2_user_options[] __initdata = {
|
||||
} v2_user_options[] __initconst = {
|
||||
{ "auto", SPECTRE_V2_USER_CMD_AUTO, false },
|
||||
{ "off", SPECTRE_V2_USER_CMD_NONE, false },
|
||||
{ "on", SPECTRE_V2_USER_CMD_FORCE, true },
|
||||
@ -419,7 +419,7 @@ static const struct {
|
||||
const char *option;
|
||||
enum spectre_v2_mitigation_cmd cmd;
|
||||
bool secure;
|
||||
} mitigation_options[] __initdata = {
|
||||
} mitigation_options[] __initconst = {
|
||||
{ "off", SPECTRE_V2_CMD_NONE, false },
|
||||
{ "on", SPECTRE_V2_CMD_FORCE, true },
|
||||
{ "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
|
||||
@ -658,7 +658,7 @@ static const char * const ssb_strings[] = {
|
||||
static const struct {
|
||||
const char *option;
|
||||
enum ssb_mitigation_cmd cmd;
|
||||
} ssb_mitigation_options[] __initdata = {
|
||||
} ssb_mitigation_options[] __initconst = {
|
||||
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
||||
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
||||
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
||||
|
@ -611,8 +611,8 @@ static void init_intel_energy_perf(struct cpuinfo_x86 *c)
|
||||
if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
|
||||
return;
|
||||
|
||||
pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
|
||||
pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
|
||||
pr_info_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
|
||||
pr_info_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
|
||||
epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
|
||||
wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
|
||||
}
|
||||
|
@ -2610,9 +2610,10 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
|
||||
rdt_last_cmd_puts("Failed to initialize allocations\n");
|
||||
return ret;
|
||||
}
|
||||
rdtgrp->mode = RDT_MODE_SHAREABLE;
|
||||
}
|
||||
|
||||
rdtgrp->mode = RDT_MODE_SHAREABLE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -569,6 +569,7 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
|
||||
unsigned long *sara = stack_addr(regs);
|
||||
|
||||
ri->ret_addr = (kprobe_opcode_t *) *sara;
|
||||
ri->fp = sara;
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
*sara = (unsigned long) &kretprobe_trampoline;
|
||||
@ -748,26 +749,48 @@ asm(
|
||||
NOKPROBE_SYMBOL(kretprobe_trampoline);
|
||||
STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
|
||||
|
||||
static struct kprobe kretprobe_kprobe = {
|
||||
.addr = (void *)kretprobe_trampoline,
|
||||
};
|
||||
|
||||
/*
|
||||
* Called from kretprobe_trampoline
|
||||
*/
|
||||
static __used void *trampoline_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb;
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
kprobe_opcode_t *correct_ret_addr = NULL;
|
||||
void *frame_pointer;
|
||||
bool skipped = false;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* Set a dummy kprobe for avoiding kretprobe recursion.
|
||||
* Since kretprobe never run in kprobe handler, kprobe must not
|
||||
* be running at this point.
|
||||
*/
|
||||
kcb = get_kprobe_ctlblk();
|
||||
__this_cpu_write(current_kprobe, &kretprobe_kprobe);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
kretprobe_hash_lock(current, &head, &flags);
|
||||
/* fixup registers */
|
||||
#ifdef CONFIG_X86_64
|
||||
regs->cs = __KERNEL_CS;
|
||||
/* On x86-64, we use pt_regs->sp for return address holder. */
|
||||
frame_pointer = ®s->sp;
|
||||
#else
|
||||
regs->cs = __KERNEL_CS | get_kernel_rpl();
|
||||
regs->gs = 0;
|
||||
/* On x86-32, we use pt_regs->flags for return address holder. */
|
||||
frame_pointer = ®s->flags;
|
||||
#endif
|
||||
regs->ip = trampoline_address;
|
||||
regs->orig_ax = ~0UL;
|
||||
@ -789,8 +812,25 @@ static __used void *trampoline_handler(struct pt_regs *regs)
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
/*
|
||||
* Return probes must be pushed on this hash list correct
|
||||
* order (same as return order) so that it can be poped
|
||||
* correctly. However, if we find it is pushed it incorrect
|
||||
* order, this means we find a function which should not be
|
||||
* probed, because the wrong order entry is pushed on the
|
||||
* path of processing other kretprobe itself.
|
||||
*/
|
||||
if (ri->fp != frame_pointer) {
|
||||
if (!skipped)
|
||||
pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
|
||||
skipped = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
if (skipped)
|
||||
pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
|
||||
ri->rp->kp.addr);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
@ -808,14 +848,15 @@ static __used void *trampoline_handler(struct pt_regs *regs)
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
if (ri->fp != frame_pointer)
|
||||
continue;
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->ret_addr = correct_ret_addr;
|
||||
ri->rp->handler(ri, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
__this_cpu_write(current_kprobe, &kretprobe_kprobe);
|
||||
}
|
||||
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
@ -831,6 +872,9 @@ static __used void *trampoline_handler(struct pt_regs *regs)
|
||||
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
preempt_enable();
|
||||
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
|
@ -426,6 +426,8 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
||||
u64 msr = x86_spec_ctrl_base;
|
||||
bool updmsr = false;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
/*
|
||||
* If TIF_SSBD is different, select the proper mitigation
|
||||
* method. Note that if SSBD mitigation is disabled or permanentely
|
||||
@ -477,10 +479,12 @@ static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
|
||||
|
||||
void speculation_ctrl_update(unsigned long tif)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* Forced update. Make sure all relevant TIF flags are different */
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
__speculation_ctrl_update(~tif, tif);
|
||||
preempt_enable();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* Called from seccomp/prctl update */
|
||||
|
@ -81,6 +81,19 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some machines don't handle the default ACPI reboot method and
|
||||
* require the EFI reboot method:
|
||||
*/
|
||||
static int __init set_efi_reboot(const struct dmi_system_id *d)
|
||||
{
|
||||
if (reboot_type != BOOT_EFI && !efi_runtime_disabled()) {
|
||||
reboot_type = BOOT_EFI;
|
||||
pr_info("%s series board detected. Selecting EFI-method for reboot.\n", d->ident);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __noreturn machine_real_restart(unsigned int type)
|
||||
{
|
||||
local_irq_disable();
|
||||
@ -166,6 +179,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
|
||||
},
|
||||
},
|
||||
{ /* Handle reboot issue on Acer TravelMate X514-51T */
|
||||
.callback = set_efi_reboot,
|
||||
.ident = "Acer TravelMate X514-51T",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate X514-51T"),
|
||||
},
|
||||
},
|
||||
|
||||
/* Apple */
|
||||
{ /* Handle problems with rebooting on Apple MacBook5 */
|
||||
|
@ -362,7 +362,7 @@ SECTIONS
|
||||
.bss : AT(ADDR(.bss) - LOAD_OFFSET) {
|
||||
__bss_start = .;
|
||||
*(.bss..page_aligned)
|
||||
*(.bss)
|
||||
*(BSS_MAIN)
|
||||
BSS_DECRYPTED
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__bss_stop = .;
|
||||
|
@ -259,7 +259,8 @@ static void note_wx(struct pg_state *st)
|
||||
#endif
|
||||
/* Account the WX pages */
|
||||
st->wx_pages += npages;
|
||||
WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %pS\n",
|
||||
WARN_ONCE(__supported_pte_mask & _PAGE_NX,
|
||||
"x86/mm: Found insecure W+X mapping at address %pS\n",
|
||||
(void *)st->start_address);
|
||||
}
|
||||
|
||||
|
@ -825,7 +825,7 @@ void __init __early_set_fixmap(enum fixed_addresses idx,
|
||||
pte = early_ioremap_pte(addr);
|
||||
|
||||
/* Sanitize 'prot' against any unsupported bits: */
|
||||
pgprot_val(flags) &= __default_kernel_pte_mask;
|
||||
pgprot_val(flags) &= __supported_pte_mask;
|
||||
|
||||
if (pgprot_val(flags))
|
||||
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
|
||||
|
@ -94,7 +94,7 @@ void __init kernel_randomize_memory(void)
|
||||
if (!kaslr_memory_enabled())
|
||||
return;
|
||||
|
||||
kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT);
|
||||
kaslr_regions[0].size_tb = 1 << (MAX_PHYSMEM_BITS - TB_SHIFT);
|
||||
kaslr_regions[1].size_tb = VMALLOC_SIZE_TB;
|
||||
|
||||
/*
|
||||
|
@ -728,7 +728,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
|
||||
{
|
||||
int cpu;
|
||||
|
||||
struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = {
|
||||
struct flush_tlb_info info = {
|
||||
.mm = mm,
|
||||
.stride_shift = stride_shift,
|
||||
.freed_tables = freed_tables,
|
||||
|
@ -394,3 +394,7 @@
|
||||
421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
|
||||
422 common futex_time64 sys_futex
|
||||
423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
|
||||
424 common pidfd_send_signal sys_pidfd_send_signal
|
||||
425 common io_uring_setup sys_io_uring_setup
|
||||
426 common io_uring_enter sys_io_uring_enter
|
||||
427 common io_uring_register sys_io_uring_register
|
||||
|
@ -5396,7 +5396,7 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
|
||||
return min_shallow;
|
||||
}
|
||||
|
||||
static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
|
||||
static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
|
||||
struct blk_mq_tags *tags = hctx->sched_tags;
|
||||
@ -5404,6 +5404,11 @@ static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
|
||||
|
||||
min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
|
||||
sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
|
||||
}
|
||||
|
||||
static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
|
||||
{
|
||||
bfq_depth_updated(hctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5826,6 +5831,7 @@ static struct elevator_type iosched_bfq_mq = {
|
||||
.requests_merged = bfq_requests_merged,
|
||||
.request_merged = bfq_request_merged,
|
||||
.has_work = bfq_has_work,
|
||||
.depth_updated = bfq_depth_updated,
|
||||
.init_hctx = bfq_init_hctx,
|
||||
.init_sched = bfq_init_queue,
|
||||
.exit_sched = bfq_exit_queue,
|
||||
|
@ -3135,6 +3135,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
|
||||
}
|
||||
if (ret)
|
||||
break;
|
||||
if (q->elevator && q->elevator->type->ops.depth_updated)
|
||||
q->elevator->type->ops.depth_updated(hctx);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
|
@ -5634,7 +5634,49 @@ static const struct hash_testvec poly1305_tv_template[] = {
|
||||
.psize = 80,
|
||||
.digest = "\x13\x00\x00\x00\x00\x00\x00\x00"
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
},
|
||||
}, { /* Regression test for overflow in AVX2 implementation */
|
||||
.plaintext = "\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff\xff\xff\xff\xff"
|
||||
"\xff\xff\xff\xff",
|
||||
.psize = 300,
|
||||
.digest = "\xfb\x5e\x96\xd8\x61\xd5\xc7\xc8"
|
||||
"\x78\xe5\x87\xcc\x2d\x5a\x22\xe1",
|
||||
}
|
||||
};
|
||||
|
||||
/* NHPoly1305 test vectors from https://github.com/google/adiantum */
|
||||
|
@ -1646,7 +1646,7 @@ static irqreturn_t fs_irq (int irq, void *dev_id)
|
||||
}
|
||||
|
||||
if (status & ISR_TBRQ_W) {
|
||||
fs_dprintk (FS_DEBUG_IRQ, "Data tramsitted!\n");
|
||||
fs_dprintk (FS_DEBUG_IRQ, "Data transmitted!\n");
|
||||
process_txdone_queue (dev, &dev->tx_relq);
|
||||
}
|
||||
|
||||
|
@ -506,7 +506,7 @@ static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
ret = lock_device_hotplug_sysfs();
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
nid = memory_add_physaddr_to_nid(phys_addr);
|
||||
ret = __add_memory(nid, phys_addr,
|
||||
|
@ -145,6 +145,7 @@ config VT8500_TIMER
|
||||
config NPCM7XX_TIMER
|
||||
bool "NPCM7xx timer driver" if COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
select TIMER_OF
|
||||
select CLKSRC_MMIO
|
||||
help
|
||||
Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
|
||||
|
@ -9,7 +9,7 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "arm_arch_timer: " fmt
|
||||
#define pr_fmt(fmt) "arch_timer: " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -33,9 +33,6 @@
|
||||
|
||||
#include <clocksource/arm_arch_timer.h>
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "arch_timer: " fmt
|
||||
|
||||
#define CNTTIDR 0x08
|
||||
#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
|
||||
|
||||
|
@ -296,4 +296,4 @@ err_alloc:
|
||||
TIMER_OF_DECLARE(ox810se_rps,
|
||||
"oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
|
||||
TIMER_OF_DECLARE(ox820_rps,
|
||||
"oxsemi,ox820se-rps-timer", oxnas_rps_timer_init);
|
||||
"oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
|
||||
|
@ -585,34 +585,6 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Optimized set_load which removes costly spin wait in timer_start */
|
||||
static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
|
||||
int autoreload, unsigned int load)
|
||||
{
|
||||
u32 l;
|
||||
|
||||
if (unlikely(!timer))
|
||||
return -EINVAL;
|
||||
|
||||
omap_dm_timer_enable(timer);
|
||||
|
||||
l = omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
|
||||
if (autoreload) {
|
||||
l |= OMAP_TIMER_CTRL_AR;
|
||||
omap_dm_timer_write_reg(timer, OMAP_TIMER_LOAD_REG, load);
|
||||
} else {
|
||||
l &= ~OMAP_TIMER_CTRL_AR;
|
||||
}
|
||||
l |= OMAP_TIMER_CTRL_ST;
|
||||
|
||||
__omap_dm_timer_load_start(timer, l, load, timer->posted);
|
||||
|
||||
/* Save the context */
|
||||
timer->context.tclr = l;
|
||||
timer->context.tldr = load;
|
||||
timer->context.tcrr = load;
|
||||
return 0;
|
||||
}
|
||||
static int omap_dm_timer_set_match(struct omap_dm_timer *timer, int enable,
|
||||
unsigned int match)
|
||||
{
|
||||
|
@ -116,7 +116,7 @@ config EXTCON_PALMAS
|
||||
|
||||
config EXTCON_PTN5150
|
||||
tristate "NXP PTN5150 CC LOGIC USB EXTCON support"
|
||||
depends on I2C && GPIOLIB || COMPILE_TEST
|
||||
depends on I2C && (GPIOLIB || COMPILE_TEST)
|
||||
select REGMAP_I2C
|
||||
help
|
||||
Say Y here to enable support for USB peripheral and USB host
|
||||
|
@ -3165,6 +3165,7 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
|
||||
|
||||
/* No need to recover an evicted BO */
|
||||
if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
|
||||
shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
|
||||
shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
|
||||
continue;
|
||||
|
||||
|
@ -182,6 +182,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
|
||||
L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
|
||||
}
|
||||
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
|
||||
|
||||
tmp = mmVM_L2_CNTL4_DEFAULT;
|
||||
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
|
||||
|
@ -1377,6 +1377,11 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
|
||||
return UPDATE_TYPE_FULL;
|
||||
}
|
||||
|
||||
if (u->surface->force_full_update) {
|
||||
update_flags->bits.full_update = 1;
|
||||
return UPDATE_TYPE_FULL;
|
||||
}
|
||||
|
||||
type = get_plane_info_update_type(u);
|
||||
elevate_update_type(&overall_type, type);
|
||||
|
||||
@ -1802,6 +1807,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
}
|
||||
|
||||
dc_resource_state_copy_construct(state, context);
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
|
||||
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
|
||||
new_pipe->plane_state->force_full_update = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1838,6 +1851,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
|
||||
dc->current_state = context;
|
||||
dc_release_state(old);
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
|
||||
pipe_ctx->plane_state->force_full_update = false;
|
||||
}
|
||||
}
|
||||
/*let's use current_state to update watermark etc*/
|
||||
if (update_type >= UPDATE_TYPE_FULL)
|
||||
|
@ -503,6 +503,9 @@ struct dc_plane_state {
|
||||
struct dc_plane_status status;
|
||||
struct dc_context *ctx;
|
||||
|
||||
/* HACK: Workaround for forcing full reprogramming under some conditions */
|
||||
bool force_full_update;
|
||||
|
||||
/* private to dc_surface.c */
|
||||
enum dc_irq_source irq_source;
|
||||
struct kref refcount;
|
||||
|
@ -190,6 +190,12 @@ static void submit_channel_request(
|
||||
1,
|
||||
0);
|
||||
}
|
||||
|
||||
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
|
||||
|
||||
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
|
||||
10, aux110->timeout_period/10);
|
||||
|
||||
/* set the delay and the number of bytes to write */
|
||||
|
||||
/* The length include
|
||||
@ -242,9 +248,6 @@ static void submit_channel_request(
|
||||
}
|
||||
}
|
||||
|
||||
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
|
||||
REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
|
||||
10, aux110->timeout_period/10);
|
||||
REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
|
||||
}
|
||||
|
||||
|
@ -71,11 +71,11 @@ enum { /* This is the timeout as defined in DP 1.2a,
|
||||
* at most within ~240usec. That means,
|
||||
* increasing this timeout will not affect normal operation,
|
||||
* and we'll timeout after
|
||||
* SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
|
||||
* SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
|
||||
* This timeout is especially important for
|
||||
* resume from S3 and CTS.
|
||||
* converters, resume from S3, and CTS.
|
||||
*/
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
|
||||
SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
|
||||
};
|
||||
|
||||
struct dce_aux {
|
||||
|
@ -338,8 +338,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
bool *enabled, int width, int height)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
|
||||
unsigned long conn_configured, conn_seq, mask;
|
||||
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
|
||||
unsigned long conn_configured, conn_seq;
|
||||
int i, j;
|
||||
bool *save_enabled;
|
||||
bool fallback = true, ret = true;
|
||||
@ -357,9 +357,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
drm_modeset_backoff(&ctx);
|
||||
|
||||
memcpy(save_enabled, enabled, count);
|
||||
conn_seq = GENMASK(count - 1, 0);
|
||||
mask = GENMASK(count - 1, 0);
|
||||
conn_configured = 0;
|
||||
retry:
|
||||
conn_seq = conn_configured;
|
||||
for (i = 0; i < count; i++) {
|
||||
struct drm_fb_helper_connector *fb_conn;
|
||||
struct drm_connector *connector;
|
||||
@ -372,8 +373,7 @@ retry:
|
||||
if (conn_configured & BIT(i))
|
||||
continue;
|
||||
|
||||
/* First pass, only consider tiled connectors */
|
||||
if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
|
||||
if (conn_seq == 0 && !connector->has_tile)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected)
|
||||
@ -477,10 +477,8 @@ retry:
|
||||
conn_configured |= BIT(i);
|
||||
}
|
||||
|
||||
if (conn_configured != conn_seq) { /* repeat until no more are found */
|
||||
conn_seq = conn_configured;
|
||||
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the BIOS didn't enable everything it could, fall back to have the
|
||||
|
@ -1260,9 +1260,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
|
||||
|
||||
hdmi->dvi = !tegra_output_is_hdmi(output);
|
||||
if (!hdmi->dvi) {
|
||||
err = tegra_hdmi_setup_audio(hdmi);
|
||||
if (err < 0)
|
||||
hdmi->dvi = true;
|
||||
/*
|
||||
* Make sure that the audio format has been configured before
|
||||
* enabling audio, otherwise we may try to divide by zero.
|
||||
*/
|
||||
if (hdmi->format.sample_rate > 0) {
|
||||
err = tegra_hdmi_setup_audio(hdmi);
|
||||
if (err < 0)
|
||||
hdmi->dvi = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (hdmi->config->has_hda)
|
||||
|
@ -876,8 +876,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
|
||||
reservation_object_add_shared_fence(bo->resv, fence);
|
||||
|
||||
ret = reservation_object_reserve_shared(bo->resv, 1);
|
||||
if (unlikely(ret))
|
||||
if (unlikely(ret)) {
|
||||
dma_fence_put(fence);
|
||||
return ret;
|
||||
}
|
||||
|
||||
dma_fence_put(bo->moving);
|
||||
bo->moving = fence;
|
||||
|
@ -730,9 +730,10 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
if (!(flags & TTM_PAGE_FLAG_DMA32)) {
|
||||
for (j = 0; j < HPAGE_PMD_NR; ++j)
|
||||
if (p++ != pages[i + j])
|
||||
if (!(flags & TTM_PAGE_FLAG_DMA32) &&
|
||||
(npages - i) >= HPAGE_PMD_NR) {
|
||||
for (j = 1; j < HPAGE_PMD_NR; ++j)
|
||||
if (++p != pages[i + j])
|
||||
break;
|
||||
|
||||
if (j == HPAGE_PMD_NR)
|
||||
@ -759,15 +760,15 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
|
||||
unsigned max_size, n2free;
|
||||
|
||||
spin_lock_irqsave(&huge->lock, irq_flags);
|
||||
while (i < npages) {
|
||||
while ((npages - i) >= HPAGE_PMD_NR) {
|
||||
struct page *p = pages[i];
|
||||
unsigned j;
|
||||
|
||||
if (!p)
|
||||
break;
|
||||
|
||||
for (j = 0; j < HPAGE_PMD_NR; ++j)
|
||||
if (p++ != pages[i + j])
|
||||
for (j = 1; j < HPAGE_PMD_NR; ++j)
|
||||
if (++p != pages[i + j])
|
||||
break;
|
||||
|
||||
if (j != HPAGE_PMD_NR)
|
||||
|
@ -205,10 +205,14 @@ static struct drm_driver driver = {
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
.debugfs_init = virtio_gpu_debugfs_init,
|
||||
#endif
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_pin = virtgpu_gem_prime_pin,
|
||||
.gem_prime_unpin = virtgpu_gem_prime_unpin,
|
||||
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
|
||||
.gem_prime_vmap = virtgpu_gem_prime_vmap,
|
||||
.gem_prime_vunmap = virtgpu_gem_prime_vunmap,
|
||||
.gem_prime_mmap = virtgpu_gem_prime_mmap,
|
||||
|
@ -354,6 +354,10 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
|
||||
/* virtgpu_prime.c */
|
||||
int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
|
||||
struct drm_device *dev, struct dma_buf_attachment *attach,
|
||||
struct sg_table *sgt);
|
||||
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
|
||||
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
|
||||
|
@ -39,6 +39,18 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
|
||||
WARN_ONCE(1, "not implemented");
|
||||
}
|
||||
|
||||
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
|
||||
struct drm_device *dev, struct dma_buf_attachment *attach,
|
||||
struct sg_table *table)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
|
||||
{
|
||||
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
|
||||
|
@ -114,9 +114,13 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
|
||||
|
||||
static void host1x_channel_set_streamid(struct host1x_channel *channel)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_IOMMU_API) && HOST1X_HW >= 6
|
||||
#if HOST1X_HW >= 6
|
||||
u32 sid = 0x7f;
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
|
||||
u32 sid = spec ? spec->ids[0] & 0xffff : 0x7f;
|
||||
if (spec)
|
||||
sid = spec->ids[0] & 0xffff;
|
||||
#endif
|
||||
|
||||
host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
|
||||
#endif
|
||||
|
@ -680,6 +680,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
|
||||
break;
|
||||
}
|
||||
|
||||
if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */
|
||||
switch (usage->hid & 0xf) {
|
||||
case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
|
||||
default: goto ignore;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some lazy vendors declare 255 usages for System Control,
|
||||
* leading to the creation of ABS_X|Y axis and too many others.
|
||||
@ -902,7 +910,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
|
||||
case 0x06a: map_key_clear(KEY_GREEN); break;
|
||||
case 0x06b: map_key_clear(KEY_BLUE); break;
|
||||
case 0x06c: map_key_clear(KEY_YELLOW); break;
|
||||
case 0x06d: map_key_clear(KEY_ZOOM); break;
|
||||
case 0x06d: map_key_clear(KEY_ASPECT_RATIO); break;
|
||||
|
||||
case 0x06f: map_key_clear(KEY_BRIGHTNESSUP); break;
|
||||
case 0x070: map_key_clear(KEY_BRIGHTNESSDOWN); break;
|
||||
@ -911,6 +919,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
|
||||
case 0x074: map_key_clear(KEY_BRIGHTNESS_MAX); break;
|
||||
case 0x075: map_key_clear(KEY_BRIGHTNESS_AUTO); break;
|
||||
|
||||
case 0x079: map_key_clear(KEY_KBDILLUMUP); break;
|
||||
case 0x07a: map_key_clear(KEY_KBDILLUMDOWN); break;
|
||||
case 0x07c: map_key_clear(KEY_KBDILLUMTOGGLE); break;
|
||||
|
||||
case 0x082: map_key_clear(KEY_VIDEO_NEXT); break;
|
||||
case 0x083: map_key_clear(KEY_LAST); break;
|
||||
case 0x084: map_key_clear(KEY_ENTER); break;
|
||||
@ -1022,6 +1034,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
|
||||
case 0x22d: map_key_clear(KEY_ZOOMIN); break;
|
||||
case 0x22e: map_key_clear(KEY_ZOOMOUT); break;
|
||||
case 0x22f: map_key_clear(KEY_ZOOMRESET); break;
|
||||
case 0x232: map_key_clear(KEY_FULL_SCREEN); break;
|
||||
case 0x233: map_key_clear(KEY_SCROLLUP); break;
|
||||
case 0x234: map_key_clear(KEY_SCROLLDOWN); break;
|
||||
case 0x238: /* AC Pan */
|
||||
@ -1045,6 +1058,8 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
|
||||
case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
|
||||
case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
|
||||
|
||||
case 0x29f: map_key_clear(KEY_SCALE); break;
|
||||
|
||||
default: map_key_clear(KEY_UNKNOWN);
|
||||
}
|
||||
break;
|
||||
|
@ -1980,7 +1980,6 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
|
||||
{
|
||||
struct i3c_dev_boardinfo *boardinfo;
|
||||
struct device *dev = &master->dev;
|
||||
struct i3c_device_info info = { };
|
||||
enum i3c_addr_slot_status addrstatus;
|
||||
u32 init_dyn_addr = 0;
|
||||
|
||||
@ -2012,8 +2011,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
|
||||
|
||||
boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
|
||||
|
||||
if ((info.pid & GENMASK_ULL(63, 48)) ||
|
||||
I3C_PID_RND_LOWER_32BITS(info.pid))
|
||||
if ((boardinfo->pid & GENMASK_ULL(63, 48)) ||
|
||||
I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
|
||||
return -EINVAL;
|
||||
|
||||
boardinfo->init_dyn_addr = init_dyn_addr;
|
||||
|
@ -300,7 +300,7 @@ to_dw_i3c_master(struct i3c_master_controller *master)
|
||||
|
||||
static void dw_i3c_master_disable(struct dw_i3c_master *master)
|
||||
{
|
||||
writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE,
|
||||
writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
|
||||
master->regs + DEVICE_CTRL);
|
||||
}
|
||||
|
||||
|
@ -1437,6 +1437,8 @@ static int kxcjk1013_resume(struct device *dev)
|
||||
|
||||
mutex_lock(&data->mutex);
|
||||
ret = kxcjk1013_set_mode(data, OPERATION);
|
||||
if (ret == 0)
|
||||
ret = kxcjk1013_set_range(data, data->range);
|
||||
mutex_unlock(&data->mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -121,6 +121,7 @@ static int ad_sd_read_reg_raw(struct ad_sigma_delta *sigma_delta,
|
||||
if (sigma_delta->info->has_registers) {
|
||||
data[0] = reg << sigma_delta->info->addr_shift;
|
||||
data[0] |= sigma_delta->info->read_mask;
|
||||
data[0] |= sigma_delta->comm;
|
||||
spi_message_add_tail(&t[0], &m);
|
||||
}
|
||||
spi_message_add_tail(&t[1], &m);
|
||||
|
@ -704,23 +704,29 @@ static int at91_adc_read_raw(struct iio_dev *idev,
|
||||
ret = wait_event_interruptible_timeout(st->wq_data_avail,
|
||||
st->done,
|
||||
msecs_to_jiffies(1000));
|
||||
if (ret == 0)
|
||||
ret = -ETIMEDOUT;
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&st->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
*val = st->last_value;
|
||||
|
||||
/* Disable interrupts, regardless if adc conversion was
|
||||
* successful or not
|
||||
*/
|
||||
at91_adc_writel(st, AT91_ADC_CHDR,
|
||||
AT91_ADC_CH(chan->channel));
|
||||
at91_adc_writel(st, AT91_ADC_IDR, BIT(chan->channel));
|
||||
|
||||
st->last_value = 0;
|
||||
st->done = false;
|
||||
if (ret > 0) {
|
||||
/* a valid conversion took place */
|
||||
*val = st->last_value;
|
||||
st->last_value = 0;
|
||||
st->done = false;
|
||||
ret = IIO_VAL_INT;
|
||||
} else if (ret == 0) {
|
||||
/* conversion timeout */
|
||||
dev_err(&idev->dev, "ADC Channel %d timeout.\n",
|
||||
chan->channel);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
mutex_unlock(&st->lock);
|
||||
return IIO_VAL_INT;
|
||||
return ret;
|
||||
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
*val = st->vref_mv;
|
||||
|
@ -1292,6 +1292,7 @@ static int xadc_probe(struct platform_device *pdev)
|
||||
|
||||
err_free_irq:
|
||||
free_irq(xadc->irq, indio_dev);
|
||||
cancel_delayed_work_sync(&xadc->zynq_unmask_work);
|
||||
err_clk_disable_unprepare:
|
||||
clk_disable_unprepare(xadc->clk);
|
||||
err_free_samplerate_trigger:
|
||||
@ -1321,8 +1322,8 @@ static int xadc_remove(struct platform_device *pdev)
|
||||
iio_triggered_buffer_cleanup(indio_dev);
|
||||
}
|
||||
free_irq(xadc->irq, indio_dev);
|
||||
cancel_delayed_work_sync(&xadc->zynq_unmask_work);
|
||||
clk_disable_unprepare(xadc->clk);
|
||||
cancel_delayed_work(&xadc->zynq_unmask_work);
|
||||
kfree(xadc->data);
|
||||
kfree(indio_dev->channels);
|
||||
|
||||
|
@ -64,6 +64,7 @@ config IAQCORE
|
||||
config PMS7003
|
||||
tristate "Plantower PMS7003 particulate matter sensor"
|
||||
depends on SERIAL_DEV_BUS
|
||||
select IIO_TRIGGERED_BUFFER
|
||||
help
|
||||
Say Y here to build support for the Plantower PMS7003 particulate
|
||||
matter sensor.
|
||||
@ -71,6 +72,19 @@ config PMS7003
|
||||
To compile this driver as a module, choose M here: the module will
|
||||
be called pms7003.
|
||||
|
||||
config SENSIRION_SGP30
|
||||
tristate "Sensirion SGPxx gas sensors"
|
||||
depends on I2C
|
||||
select CRC8
|
||||
help
|
||||
Say Y here to build I2C interface support for the following
|
||||
Sensirion SGP gas sensors:
|
||||
* SGP30 gas sensor
|
||||
* SGPC3 low power gas sensor
|
||||
|
||||
To compile this driver as module, choose M here: the
|
||||
module will be called sgp30.
|
||||
|
||||
config SPS30
|
||||
tristate "SPS30 particulate matter sensor"
|
||||
depends on I2C
|
||||
|
@ -2,11 +2,9 @@
|
||||
#ifndef BME680_H_
|
||||
#define BME680_H_
|
||||
|
||||
#define BME680_REG_CHIP_I2C_ID 0xD0
|
||||
#define BME680_REG_CHIP_SPI_ID 0x50
|
||||
#define BME680_REG_CHIP_ID 0xD0
|
||||
#define BME680_CHIP_ID_VAL 0x61
|
||||
#define BME680_REG_SOFT_RESET_I2C 0xE0
|
||||
#define BME680_REG_SOFT_RESET_SPI 0x60
|
||||
#define BME680_REG_SOFT_RESET 0xE0
|
||||
#define BME680_CMD_SOFTRESET 0xB6
|
||||
#define BME680_REG_STATUS 0x73
|
||||
#define BME680_SPI_MEM_PAGE_BIT BIT(4)
|
||||
|
@ -63,9 +63,23 @@ struct bme680_data {
|
||||
s32 t_fine;
|
||||
};
|
||||
|
||||
static const struct regmap_range bme680_volatile_ranges[] = {
|
||||
regmap_reg_range(BME680_REG_MEAS_STAT_0, BME680_REG_GAS_R_LSB),
|
||||
regmap_reg_range(BME680_REG_STATUS, BME680_REG_STATUS),
|
||||
regmap_reg_range(BME680_T2_LSB_REG, BME680_GH3_REG),
|
||||
};
|
||||
|
||||
static const struct regmap_access_table bme680_volatile_table = {
|
||||
.yes_ranges = bme680_volatile_ranges,
|
||||
.n_yes_ranges = ARRAY_SIZE(bme680_volatile_ranges),
|
||||
};
|
||||
|
||||
const struct regmap_config bme680_regmap_config = {
|
||||
.reg_bits = 8,
|
||||
.val_bits = 8,
|
||||
.max_register = 0xef,
|
||||
.volatile_table = &bme680_volatile_table,
|
||||
.cache_type = REGCACHE_RBTREE,
|
||||
};
|
||||
EXPORT_SYMBOL(bme680_regmap_config);
|
||||
|
||||
@ -316,6 +330,10 @@ static s16 bme680_compensate_temp(struct bme680_data *data,
|
||||
s64 var1, var2, var3;
|
||||
s16 calc_temp;
|
||||
|
||||
/* If the calibration is invalid, attempt to reload it */
|
||||
if (!calib->par_t2)
|
||||
bme680_read_calib(data, calib);
|
||||
|
||||
var1 = (adc_temp >> 3) - (calib->par_t1 << 1);
|
||||
var2 = (var1 * calib->par_t2) >> 11;
|
||||
var3 = ((var1 >> 1) * (var1 >> 1)) >> 12;
|
||||
@ -583,8 +601,7 @@ static int bme680_gas_config(struct bme680_data *data)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bme680_read_temp(struct bme680_data *data,
|
||||
int *val, int *val2)
|
||||
static int bme680_read_temp(struct bme680_data *data, int *val)
|
||||
{
|
||||
struct device *dev = regmap_get_device(data->regmap);
|
||||
int ret;
|
||||
@ -617,10 +634,9 @@ static int bme680_read_temp(struct bme680_data *data,
|
||||
* compensate_press/compensate_humid to get compensated
|
||||
* pressure/humidity readings.
|
||||
*/
|
||||
if (val && val2) {
|
||||
*val = comp_temp;
|
||||
*val2 = 100;
|
||||
return IIO_VAL_FRACTIONAL;
|
||||
if (val) {
|
||||
*val = comp_temp * 10; /* Centidegrees to millidegrees */
|
||||
return IIO_VAL_INT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -635,7 +651,7 @@ static int bme680_read_press(struct bme680_data *data,
|
||||
s32 adc_press;
|
||||
|
||||
/* Read and compensate temperature to get a reading of t_fine */
|
||||
ret = bme680_read_temp(data, NULL, NULL);
|
||||
ret = bme680_read_temp(data, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -668,7 +684,7 @@ static int bme680_read_humid(struct bme680_data *data,
|
||||
u32 comp_humidity;
|
||||
|
||||
/* Read and compensate temperature to get a reading of t_fine */
|
||||
ret = bme680_read_temp(data, NULL, NULL);
|
||||
ret = bme680_read_temp(data, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -761,7 +777,7 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
|
||||
case IIO_CHAN_INFO_PROCESSED:
|
||||
switch (chan->type) {
|
||||
case IIO_TEMP:
|
||||
return bme680_read_temp(data, val, val2);
|
||||
return bme680_read_temp(data, val);
|
||||
case IIO_PRESSURE:
|
||||
return bme680_read_press(data, val, val2);
|
||||
case IIO_HUMIDITYRELATIVE:
|
||||
@ -867,8 +883,28 @@ int bme680_core_probe(struct device *dev, struct regmap *regmap,
|
||||
{
|
||||
struct iio_dev *indio_dev;
|
||||
struct bme680_data *data;
|
||||
unsigned int val;
|
||||
int ret;
|
||||
|
||||
ret = regmap_write(regmap, BME680_REG_SOFT_RESET,
|
||||
BME680_CMD_SOFTRESET);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to reset chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regmap_read(regmap, BME680_REG_CHIP_ID, &val);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Error reading chip ID\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (val != BME680_CHIP_ID_VAL) {
|
||||
dev_err(dev, "Wrong chip ID, got %x expected %x\n",
|
||||
val, BME680_CHIP_ID_VAL);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
|
||||
if (!indio_dev)
|
||||
return -ENOMEM;
|
||||
|
@ -23,8 +23,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
|
||||
{
|
||||
struct regmap *regmap;
|
||||
const char *name = NULL;
|
||||
unsigned int val;
|
||||
int ret;
|
||||
|
||||
regmap = devm_regmap_init_i2c(client, &bme680_regmap_config);
|
||||
if (IS_ERR(regmap)) {
|
||||
@ -33,25 +31,6 @@ static int bme680_i2c_probe(struct i2c_client *client,
|
||||
return PTR_ERR(regmap);
|
||||
}
|
||||
|
||||
ret = regmap_write(regmap, BME680_REG_SOFT_RESET_I2C,
|
||||
BME680_CMD_SOFTRESET);
|
||||
if (ret < 0) {
|
||||
dev_err(&client->dev, "Failed to reset chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regmap_read(regmap, BME680_REG_CHIP_I2C_ID, &val);
|
||||
if (ret < 0) {
|
||||
dev_err(&client->dev, "Error reading I2C chip ID\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (val != BME680_CHIP_ID_VAL) {
|
||||
dev_err(&client->dev, "Wrong chip ID, got %x expected %x\n",
|
||||
val, BME680_CHIP_ID_VAL);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (id)
|
||||
name = id->name;
|
||||
|
||||
|
@ -12,28 +12,93 @@
|
||||
|
||||
#include "bme680.h"
|
||||
|
||||
struct bme680_spi_bus_context {
|
||||
struct spi_device *spi;
|
||||
u8 current_page;
|
||||
};
|
||||
|
||||
/*
|
||||
* In SPI mode there are only 7 address bits, a "page" register determines
|
||||
* which part of the 8-bit range is active. This function looks at the address
|
||||
* and writes the page selection bit if needed
|
||||
*/
|
||||
static int bme680_regmap_spi_select_page(
|
||||
struct bme680_spi_bus_context *ctx, u8 reg)
|
||||
{
|
||||
struct spi_device *spi = ctx->spi;
|
||||
int ret;
|
||||
u8 buf[2];
|
||||
u8 page = (reg & 0x80) ? 0 : 1; /* Page "1" is low range */
|
||||
|
||||
if (page == ctx->current_page)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Data sheet claims we're only allowed to change bit 4, so we must do
|
||||
* a read-modify-write on each and every page select
|
||||
*/
|
||||
buf[0] = BME680_REG_STATUS;
|
||||
ret = spi_write_then_read(spi, buf, 1, buf + 1, 1);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "failed to set page %u\n", page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
buf[0] = BME680_REG_STATUS;
|
||||
if (page)
|
||||
buf[1] |= BME680_SPI_MEM_PAGE_BIT;
|
||||
else
|
||||
buf[1] &= ~BME680_SPI_MEM_PAGE_BIT;
|
||||
|
||||
ret = spi_write(spi, buf, 2);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "failed to set page %u\n", page);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ctx->current_page = page;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bme680_regmap_spi_write(void *context, const void *data,
|
||||
size_t count)
|
||||
{
|
||||
struct spi_device *spi = context;
|
||||
struct bme680_spi_bus_context *ctx = context;
|
||||
struct spi_device *spi = ctx->spi;
|
||||
int ret;
|
||||
u8 buf[2];
|
||||
|
||||
memcpy(buf, data, 2);
|
||||
|
||||
ret = bme680_regmap_spi_select_page(ctx, buf[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The SPI register address (= full register address without bit 7)
|
||||
* and the write command (bit7 = RW = '0')
|
||||
*/
|
||||
buf[0] &= ~0x80;
|
||||
|
||||
return spi_write_then_read(spi, buf, 2, NULL, 0);
|
||||
return spi_write(spi, buf, 2);
|
||||
}
|
||||
|
||||
static int bme680_regmap_spi_read(void *context, const void *reg,
|
||||
size_t reg_size, void *val, size_t val_size)
|
||||
{
|
||||
struct spi_device *spi = context;
|
||||
struct bme680_spi_bus_context *ctx = context;
|
||||
struct spi_device *spi = ctx->spi;
|
||||
int ret;
|
||||
u8 addr = *(const u8 *)reg;
|
||||
|
||||
return spi_write_then_read(spi, reg, reg_size, val, val_size);
|
||||
ret = bme680_regmap_spi_select_page(ctx, addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
addr |= 0x80; /* bit7 = RW = '1' */
|
||||
|
||||
return spi_write_then_read(spi, &addr, 1, val, val_size);
|
||||
}
|
||||
|
||||
static struct regmap_bus bme680_regmap_bus = {
|
||||
@ -46,8 +111,8 @@ static struct regmap_bus bme680_regmap_bus = {
|
||||
static int bme680_spi_probe(struct spi_device *spi)
|
||||
{
|
||||
const struct spi_device_id *id = spi_get_device_id(spi);
|
||||
struct bme680_spi_bus_context *bus_context;
|
||||
struct regmap *regmap;
|
||||
unsigned int val;
|
||||
int ret;
|
||||
|
||||
spi->bits_per_word = 8;
|
||||
@ -57,45 +122,21 @@ static int bme680_spi_probe(struct spi_device *spi)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bus_context = devm_kzalloc(&spi->dev, sizeof(*bus_context), GFP_KERNEL);
|
||||
if (!bus_context)
|
||||
return -ENOMEM;
|
||||
|
||||
bus_context->spi = spi;
|
||||
bus_context->current_page = 0xff; /* Undefined on warm boot */
|
||||
|
||||
regmap = devm_regmap_init(&spi->dev, &bme680_regmap_bus,
|
||||
&spi->dev, &bme680_regmap_config);
|
||||
bus_context, &bme680_regmap_config);
|
||||
if (IS_ERR(regmap)) {
|
||||
dev_err(&spi->dev, "Failed to register spi regmap %d\n",
|
||||
(int)PTR_ERR(regmap));
|
||||
return PTR_ERR(regmap);
|
||||
}
|
||||
|
||||
ret = regmap_write(regmap, BME680_REG_SOFT_RESET_SPI,
|
||||
BME680_CMD_SOFTRESET);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "Failed to reset chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* after power-on reset, Page 0(0x80-0xFF) of spi_mem_page is active */
|
||||
ret = regmap_read(regmap, BME680_REG_CHIP_SPI_ID, &val);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "Error reading SPI chip ID\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (val != BME680_CHIP_ID_VAL) {
|
||||
dev_err(&spi->dev, "Wrong chip ID, got %x expected %x\n",
|
||||
val, BME680_CHIP_ID_VAL);
|
||||
return -ENODEV;
|
||||
}
|
||||
/*
|
||||
* select Page 1 of spi_mem_page to enable access to
|
||||
* to registers from address 0x00 to 0x7F.
|
||||
*/
|
||||
ret = regmap_write_bits(regmap, BME680_REG_STATUS,
|
||||
BME680_SPI_MEM_PAGE_BIT,
|
||||
BME680_SPI_MEM_PAGE_1_VAL);
|
||||
if (ret < 0) {
|
||||
dev_err(&spi->dev, "failed to set page 1 of spi_mem_page\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return bme680_core_probe(&spi->dev, regmap, id->name);
|
||||
}
|
||||
|
||||
|
@ -103,9 +103,10 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
|
||||
* Do not use IIO_DEGREE_TO_RAD to avoid precision
|
||||
* loss. Round to the nearest integer.
|
||||
*/
|
||||
*val = div_s64(val64 * 314159 + 9000000ULL, 1000);
|
||||
*val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
|
||||
ret = IIO_VAL_FRACTIONAL;
|
||||
*val = 0;
|
||||
*val2 = div_s64(val64 * 3141592653ULL,
|
||||
180 << (CROS_EC_SENSOR_BITS - 1));
|
||||
ret = IIO_VAL_INT_PLUS_NANO;
|
||||
break;
|
||||
case MOTIONSENSE_TYPE_MAG:
|
||||
/*
|
||||
|
@ -92,6 +92,7 @@ static ssize_t mcp4725_store_eeprom(struct device *dev,
|
||||
|
||||
inoutbuf[0] = 0x60; /* write EEPROM */
|
||||
inoutbuf[0] |= data->ref_mode << 3;
|
||||
inoutbuf[0] |= data->powerdown ? ((data->powerdown_mode + 1) << 1) : 0;
|
||||
inoutbuf[1] = data->dac_value >> 4;
|
||||
inoutbuf[2] = (data->dac_value & 0xf) << 4;
|
||||
|
||||
|
@ -582,11 +582,10 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
|
||||
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
|
||||
return bmg160_get_filter(data, val);
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
*val = 0;
|
||||
switch (chan->type) {
|
||||
case IIO_TEMP:
|
||||
*val2 = 500000;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
*val = 500;
|
||||
return IIO_VAL_INT;
|
||||
case IIO_ANGL_VEL:
|
||||
{
|
||||
int i;
|
||||
@ -594,6 +593,7 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
|
||||
for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
|
||||
if (bmg160_scale_table[i].dps_range ==
|
||||
data->dps_range) {
|
||||
*val = 0;
|
||||
*val2 = bmg160_scale_table[i].scale;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
}
|
||||
|
@ -29,7 +29,8 @@
|
||||
|
||||
#include "mpu3050.h"
|
||||
|
||||
#define MPU3050_CHIP_ID 0x69
|
||||
#define MPU3050_CHIP_ID 0x68
|
||||
#define MPU3050_CHIP_ID_MASK 0x7E
|
||||
|
||||
/*
|
||||
* Register map: anything suffixed *_H is a big-endian high byte and always
|
||||
@ -1176,8 +1177,9 @@ int mpu3050_common_probe(struct device *dev,
|
||||
goto err_power_down;
|
||||
}
|
||||
|
||||
if (val != MPU3050_CHIP_ID) {
|
||||
dev_err(dev, "unsupported chip id %02x\n", (u8)val);
|
||||
if ((val & MPU3050_CHIP_ID_MASK) != MPU3050_CHIP_ID) {
|
||||
dev_err(dev, "unsupported chip id %02x\n",
|
||||
(u8)(val & MPU3050_CHIP_ID_MASK));
|
||||
ret = -ENODEV;
|
||||
goto err_power_down;
|
||||
}
|
||||
|
@ -320,9 +320,8 @@ static int iio_scan_mask_set(struct iio_dev *indio_dev,
|
||||
const unsigned long *mask;
|
||||
unsigned long *trialmask;
|
||||
|
||||
trialmask = kmalloc_array(BITS_TO_LONGS(indio_dev->masklength),
|
||||
sizeof(*trialmask),
|
||||
GFP_KERNEL);
|
||||
trialmask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
|
||||
sizeof(*trialmask), GFP_KERNEL);
|
||||
if (trialmask == NULL)
|
||||
return -ENOMEM;
|
||||
if (!indio_dev->masklength) {
|
||||
|
@ -1743,10 +1743,10 @@ EXPORT_SYMBOL(__iio_device_register);
|
||||
**/
|
||||
void iio_device_unregister(struct iio_dev *indio_dev)
|
||||
{
|
||||
mutex_lock(&indio_dev->info_exist_lock);
|
||||
|
||||
cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
|
||||
|
||||
mutex_lock(&indio_dev->info_exist_lock);
|
||||
|
||||
iio_device_unregister_debugfs(indio_dev);
|
||||
|
||||
iio_disable_all_buffers(indio_dev);
|
||||
|
@ -993,6 +993,8 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
||||
* will only be one mm, so no big deal.
|
||||
*/
|
||||
down_write(&mm->mmap_sem);
|
||||
if (!mmget_still_valid(mm))
|
||||
goto skip_mm;
|
||||
mutex_lock(&ufile->umap_lock);
|
||||
list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
|
||||
list) {
|
||||
@ -1007,6 +1009,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
|
||||
vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
|
||||
}
|
||||
mutex_unlock(&ufile->umap_lock);
|
||||
skip_mm:
|
||||
up_write(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
|
@ -148,6 +148,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
|
||||
return error;
|
||||
}
|
||||
|
||||
pdata->input = input;
|
||||
platform_set_drvdata(pdev, pdata);
|
||||
|
||||
error = devm_request_irq(&pdev->dev, pdata->irq,
|
||||
imx_snvs_pwrkey_interrupt,
|
||||
0, pdev->name, pdev);
|
||||
@ -163,9 +166,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
|
||||
return error;
|
||||
}
|
||||
|
||||
pdata->input = input;
|
||||
platform_set_drvdata(pdev, pdata);
|
||||
|
||||
device_init_wakeup(&pdev->dev, pdata->wakeup);
|
||||
|
||||
return 0;
|
||||
|
@ -1339,21 +1339,46 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
||||
{ "ELAN0600", 0 },
|
||||
{ "ELAN0601", 0 },
|
||||
{ "ELAN0602", 0 },
|
||||
{ "ELAN0603", 0 },
|
||||
{ "ELAN0604", 0 },
|
||||
{ "ELAN0605", 0 },
|
||||
{ "ELAN0606", 0 },
|
||||
{ "ELAN0607", 0 },
|
||||
{ "ELAN0608", 0 },
|
||||
{ "ELAN0609", 0 },
|
||||
{ "ELAN060B", 0 },
|
||||
{ "ELAN060C", 0 },
|
||||
{ "ELAN060F", 0 },
|
||||
{ "ELAN0610", 0 },
|
||||
{ "ELAN0611", 0 },
|
||||
{ "ELAN0612", 0 },
|
||||
{ "ELAN0615", 0 },
|
||||
{ "ELAN0616", 0 },
|
||||
{ "ELAN0617", 0 },
|
||||
{ "ELAN0618", 0 },
|
||||
{ "ELAN0619", 0 },
|
||||
{ "ELAN061A", 0 },
|
||||
{ "ELAN061B", 0 },
|
||||
{ "ELAN061C", 0 },
|
||||
{ "ELAN061D", 0 },
|
||||
{ "ELAN061E", 0 },
|
||||
{ "ELAN061F", 0 },
|
||||
{ "ELAN0620", 0 },
|
||||
{ "ELAN0621", 0 },
|
||||
{ "ELAN0622", 0 },
|
||||
{ "ELAN0623", 0 },
|
||||
{ "ELAN0624", 0 },
|
||||
{ "ELAN0625", 0 },
|
||||
{ "ELAN0626", 0 },
|
||||
{ "ELAN0627", 0 },
|
||||
{ "ELAN0628", 0 },
|
||||
{ "ELAN0629", 0 },
|
||||
{ "ELAN062A", 0 },
|
||||
{ "ELAN062B", 0 },
|
||||
{ "ELAN062C", 0 },
|
||||
{ "ELAN062D", 0 },
|
||||
{ "ELAN0631", 0 },
|
||||
{ "ELAN0632", 0 },
|
||||
{ "ELAN1000", 0 },
|
||||
{ }
|
||||
};
|
||||
|
@ -22,6 +22,15 @@
|
||||
#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
|
||||
|
||||
#define ATH79_MISC_IRQ_COUNT 32
|
||||
#define ATH79_MISC_PERF_IRQ 5
|
||||
|
||||
static int ath79_perfcount_irq;
|
||||
|
||||
int get_c0_perfcount_int(void)
|
||||
{
|
||||
return ath79_perfcount_irq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
static void ath79_misc_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
@ -113,6 +122,8 @@ static void __init ath79_misc_intc_domain_init(
|
||||
{
|
||||
void __iomem *base = domain->host_data;
|
||||
|
||||
ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
|
||||
|
||||
/* Disable and clear all interrupts */
|
||||
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
|
||||
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
|
||||
|
@ -1184,6 +1184,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
|
||||
struct fastrpc_session_ctx *sess;
|
||||
struct device *dev = &pdev->dev;
|
||||
int i, sessions = 0;
|
||||
int rc;
|
||||
|
||||
cctx = dev_get_drvdata(dev->parent);
|
||||
if (!cctx)
|
||||
@ -1213,7 +1214,11 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
|
||||
}
|
||||
cctx->sesscount++;
|
||||
spin_unlock(&cctx->lock);
|
||||
dma_set_mask(dev, DMA_BIT_MASK(32));
|
||||
rc = dma_set_mask(dev, DMA_BIT_MASK(32));
|
||||
if (rc) {
|
||||
dev_err(dev, "32-bit DMA enable failed\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1688,12 +1688,11 @@ static void goya_init_golden_registers(struct hl_device *hdev)
|
||||
|
||||
/*
|
||||
* Workaround for H2 #HW-23 bug
|
||||
* Set DMA max outstanding read requests to 240 on DMA CH 1. Set it
|
||||
* to 16 on KMD DMA
|
||||
* We need to limit only these DMAs because the user can only read
|
||||
* Set DMA max outstanding read requests to 240 on DMA CH 1.
|
||||
* This limitation is still large enough to not affect Gen4 bandwidth.
|
||||
* We need to only limit that DMA channel because the user can only read
|
||||
* from Host using DMA CH 1
|
||||
*/
|
||||
WREG32(mmDMA_CH_0_CFG0, 0x0fff0010);
|
||||
WREG32(mmDMA_CH_1_CFG0, 0x0fff00F0);
|
||||
|
||||
goya->hw_cap_initialized |= HW_CAP_GOLDEN;
|
||||
@ -3693,7 +3692,7 @@ static int goya_validate_dma_pkt_mmu(struct hl_device *hdev,
|
||||
* WA for HW-23.
|
||||
* We can't allow user to read from Host using QMANs other than 1.
|
||||
*/
|
||||
if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 &&
|
||||
if (parser->hw_queue_id != GOYA_QUEUE_ID_DMA_1 &&
|
||||
hl_mem_area_inside_range(le64_to_cpu(user_dma_pkt->src_addr),
|
||||
le32_to_cpu(user_dma_pkt->tsize),
|
||||
hdev->asic_prop.va_space_host_start_address,
|
||||
|
@ -1721,7 +1721,7 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
|
||||
adapter->soft_stats.scc += smb->tx_1_col;
|
||||
adapter->soft_stats.mcc += smb->tx_2_col;
|
||||
adapter->soft_stats.latecol += smb->tx_late_col;
|
||||
adapter->soft_stats.tx_underun += smb->tx_underrun;
|
||||
adapter->soft_stats.tx_underrun += smb->tx_underrun;
|
||||
adapter->soft_stats.tx_trunc += smb->tx_trunc;
|
||||
adapter->soft_stats.tx_pause += smb->tx_pause;
|
||||
|
||||
@ -3179,7 +3179,7 @@ static struct atl1_stats atl1_gstrings_stats[] = {
|
||||
{"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
|
||||
{"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
|
||||
{"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
|
||||
{"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
|
||||
{"tx_underrun", ATL1_STAT(soft_stats.tx_underrun)},
|
||||
{"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
|
||||
{"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
|
||||
{"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
|
||||
|
@ -681,7 +681,7 @@ struct atl1_sft_stats {
|
||||
u64 scc; /* packets TX after a single collision */
|
||||
u64 mcc; /* packets TX after multiple collisions */
|
||||
u64 latecol; /* TX packets w/ late collisions */
|
||||
u64 tx_underun; /* TX packets aborted due to TX FIFO underrun
|
||||
u64 tx_underrun; /* TX packets aborted due to TX FIFO underrun
|
||||
* or TRD FIFO underrun */
|
||||
u64 tx_trunc; /* TX packets truncated due to size > MTU */
|
||||
u64 rx_pause; /* num Pause packets received. */
|
||||
|
@ -553,7 +553,7 @@ static void atl2_intr_tx(struct atl2_adapter *adapter)
|
||||
netdev->stats.tx_aborted_errors++;
|
||||
if (txs->late_col)
|
||||
netdev->stats.tx_window_errors++;
|
||||
if (txs->underun)
|
||||
if (txs->underrun)
|
||||
netdev->stats.tx_fifo_errors++;
|
||||
} while (1);
|
||||
|
||||
|
@ -260,7 +260,7 @@ struct tx_pkt_status {
|
||||
unsigned multi_col:1;
|
||||
unsigned late_col:1;
|
||||
unsigned abort_col:1;
|
||||
unsigned underun:1; /* current packet is aborted
|
||||
unsigned underrun:1; /* current packet is aborted
|
||||
* due to txram underrun */
|
||||
unsigned:3; /* reserved */
|
||||
unsigned update:1; /* always 1'b1 in tx_status_buf */
|
||||
|
@ -33,6 +33,26 @@
|
||||
#include <linux/bpf_trace.h>
|
||||
#include "en/xdp.h"
|
||||
|
||||
int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
|
||||
{
|
||||
int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
|
||||
|
||||
/* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
|
||||
* The condition checked in mlx5e_rx_is_linear_skb is:
|
||||
* SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1)
|
||||
* (Note that hw_mtu == sw_mtu + hard_mtu.)
|
||||
* What is returned from this function is:
|
||||
* max_mtu = PAGE_SIZE - S - hr - hard_mtu (2)
|
||||
* After assigning sw_mtu := max_mtu, the left side of (1) turns to
|
||||
* SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
|
||||
* because both PAGE_SIZE and S are already aligned. Any number greater
|
||||
* than max_mtu would make the left side of (1) greater than PAGE_SIZE,
|
||||
* so max_mtu is the maximum MTU allowed.
|
||||
*/
|
||||
|
||||
return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
|
||||
}
|
||||
|
||||
static inline bool
|
||||
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
|
||||
struct xdp_buff *xdp)
|
||||
@ -310,9 +330,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
||||
mlx5e_xdpi_fifo_pop(xdpi_fifo);
|
||||
|
||||
if (is_redirect) {
|
||||
xdp_return_frame(xdpi.xdpf);
|
||||
dma_unmap_single(sq->pdev, xdpi.dma_addr,
|
||||
xdpi.xdpf->len, DMA_TO_DEVICE);
|
||||
xdp_return_frame(xdpi.xdpf);
|
||||
} else {
|
||||
/* Recycle RX page */
|
||||
mlx5e_page_release(rq, &xdpi.di, true);
|
||||
@ -351,9 +371,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
|
||||
mlx5e_xdpi_fifo_pop(xdpi_fifo);
|
||||
|
||||
if (is_redirect) {
|
||||
xdp_return_frame(xdpi.xdpf);
|
||||
dma_unmap_single(sq->pdev, xdpi.dma_addr,
|
||||
xdpi.xdpf->len, DMA_TO_DEVICE);
|
||||
xdp_return_frame(xdpi.xdpf);
|
||||
} else {
|
||||
/* Recycle RX page */
|
||||
mlx5e_page_release(rq, &xdpi.di, false);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user