Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
The ungrafting from PRIO bug fixes in net, when merged into net-next, merge cleanly but create a build failure. The resolution used here is from Petr Machata. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
@@ -251,11 +251,11 @@ selectively from different subsystems.
|
|||||||
.. code-block:: c
|
.. code-block:: c
|
||||||
|
|
||||||
struct kcov_remote_arg {
|
struct kcov_remote_arg {
|
||||||
unsigned trace_mode;
|
__u32 trace_mode;
|
||||||
unsigned area_size;
|
__u32 area_size;
|
||||||
unsigned num_handles;
|
__u32 num_handles;
|
||||||
uint64_t common_handle;
|
__aligned_u64 common_handle;
|
||||||
uint64_t handles[0];
|
__aligned_u64 handles[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
|
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
|
||||||
|
|||||||
@@ -111,7 +111,7 @@ patternProperties:
|
|||||||
spi-rx-bus-width:
|
spi-rx-bus-width:
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: /schemas/types.yaml#/definitions/uint32
|
- $ref: /schemas/types.yaml#/definitions/uint32
|
||||||
- enum: [ 1, 2, 4 ]
|
- enum: [ 1, 2, 4, 8 ]
|
||||||
- default: 1
|
- default: 1
|
||||||
description:
|
description:
|
||||||
Bus width to the SPI bus used for MISO.
|
Bus width to the SPI bus used for MISO.
|
||||||
@@ -123,7 +123,7 @@ patternProperties:
|
|||||||
spi-tx-bus-width:
|
spi-tx-bus-width:
|
||||||
allOf:
|
allOf:
|
||||||
- $ref: /schemas/types.yaml#/definitions/uint32
|
- $ref: /schemas/types.yaml#/definitions/uint32
|
||||||
- enum: [ 1, 2, 4 ]
|
- enum: [ 1, 2, 4, 8 ]
|
||||||
- default: 1
|
- default: 1
|
||||||
description:
|
description:
|
||||||
Bus width to the SPI bus used for MOSI.
|
Bus width to the SPI bus used for MOSI.
|
||||||
|
|||||||
@@ -23,7 +23,7 @@
|
|||||||
| openrisc: | TODO |
|
| openrisc: | TODO |
|
||||||
| parisc: | TODO |
|
| parisc: | TODO |
|
||||||
| powerpc: | ok |
|
| powerpc: | ok |
|
||||||
| riscv: | TODO |
|
| riscv: | ok |
|
||||||
| s390: | ok |
|
| s390: | ok |
|
||||||
| sh: | ok |
|
| sh: | ok |
|
||||||
| sparc: | TODO |
|
| sparc: | TODO |
|
||||||
|
|||||||
@@ -607,7 +607,7 @@ tcp_synack_retries - INTEGER
|
|||||||
with the current initial RTO of 1second. With this the final timeout
|
with the current initial RTO of 1second. With this the final timeout
|
||||||
for a passive TCP connection will happen after 63seconds.
|
for a passive TCP connection will happen after 63seconds.
|
||||||
|
|
||||||
tcp_syncookies - BOOLEAN
|
tcp_syncookies - INTEGER
|
||||||
Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
|
Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
|
||||||
Send out syncookies when the syn backlog queue of a socket
|
Send out syncookies when the syn backlog queue of a socket
|
||||||
overflows. This is to prevent against the common 'SYN flood attack'
|
overflows. This is to prevent against the common 'SYN flood attack'
|
||||||
|
|||||||
@@ -34,8 +34,8 @@ the names, the ``net`` tree is for fixes to existing code already in the
|
|||||||
mainline tree from Linus, and ``net-next`` is where the new code goes
|
mainline tree from Linus, and ``net-next`` is where the new code goes
|
||||||
for the future release. You can find the trees here:
|
for the future release. You can find the trees here:
|
||||||
|
|
||||||
- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||||
- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||||
|
|
||||||
Q: How often do changes from these trees make it to the mainline Linus tree?
|
Q: How often do changes from these trees make it to the mainline Linus tree?
|
||||||
----------------------------------------------------------------------------
|
----------------------------------------------------------------------------
|
||||||
|
|||||||
@@ -60,6 +60,7 @@ lack of a better place.
|
|||||||
volatile-considered-harmful
|
volatile-considered-harmful
|
||||||
botching-up-ioctls
|
botching-up-ioctls
|
||||||
clang-format
|
clang-format
|
||||||
|
../riscv/patch-acceptance
|
||||||
|
|
||||||
.. only:: subproject and html
|
.. only:: subproject and html
|
||||||
|
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ RISC-V architecture
|
|||||||
|
|
||||||
boot-image-header
|
boot-image-header
|
||||||
pmu
|
pmu
|
||||||
|
patch-acceptance
|
||||||
|
|
||||||
.. only:: subproject and html
|
.. only:: subproject and html
|
||||||
|
|
||||||
|
|||||||
35
Documentation/riscv/patch-acceptance.rst
Normal file
35
Documentation/riscv/patch-acceptance.rst
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
arch/riscv maintenance guidelines for developers
|
||||||
|
================================================
|
||||||
|
|
||||||
|
Overview
|
||||||
|
--------
|
||||||
|
The RISC-V instruction set architecture is developed in the open:
|
||||||
|
in-progress drafts are available for all to review and to experiment
|
||||||
|
with implementations. New module or extension drafts can change
|
||||||
|
during the development process - sometimes in ways that are
|
||||||
|
incompatible with previous drafts. This flexibility can present a
|
||||||
|
challenge for RISC-V Linux maintenance. Linux maintainers disapprove
|
||||||
|
of churn, and the Linux development process prefers well-reviewed and
|
||||||
|
tested code over experimental code. We wish to extend these same
|
||||||
|
principles to the RISC-V-related code that will be accepted for
|
||||||
|
inclusion in the kernel.
|
||||||
|
|
||||||
|
Submit Checklist Addendum
|
||||||
|
-------------------------
|
||||||
|
We'll only accept patches for new modules or extensions if the
|
||||||
|
specifications for those modules or extensions are listed as being
|
||||||
|
"Frozen" or "Ratified" by the RISC-V Foundation. (Developers may, of
|
||||||
|
course, maintain their own Linux kernel trees that contain code for
|
||||||
|
any draft extensions that they wish.)
|
||||||
|
|
||||||
|
Additionally, the RISC-V specification allows implementors to create
|
||||||
|
their own custom extensions. These custom extensions aren't required
|
||||||
|
to go through any review or ratification process by the RISC-V
|
||||||
|
Foundation. To avoid the maintenance complexity and potential
|
||||||
|
performance impact of adding kernel code for implementor-specific
|
||||||
|
RISC-V extensions, we'll only to accept patches for extensions that
|
||||||
|
have been officially frozen or ratified by the RISC-V Foundation.
|
||||||
|
(Implementors, may, of course, maintain their own Linux kernel trees
|
||||||
|
containing code for any custom extensions that they wish.)
|
||||||
14
MAINTAINERS
14
MAINTAINERS
@@ -11460,8 +11460,8 @@ M: "David S. Miller" <davem@davemloft.net>
|
|||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: http://www.linuxfoundation.org/en/Net
|
W: http://www.linuxfoundation.org/en/Net
|
||||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||||
S: Odd Fixes
|
S: Odd Fixes
|
||||||
F: Documentation/devicetree/bindings/net/
|
F: Documentation/devicetree/bindings/net/
|
||||||
F: drivers/net/
|
F: drivers/net/
|
||||||
@@ -11502,8 +11502,8 @@ M: "David S. Miller" <davem@davemloft.net>
|
|||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: http://www.linuxfoundation.org/en/Net
|
W: http://www.linuxfoundation.org/en/Net
|
||||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||||
B: mailto:netdev@vger.kernel.org
|
B: mailto:netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: net/
|
F: net/
|
||||||
@@ -11548,7 +11548,7 @@ M: "David S. Miller" <davem@davemloft.net>
|
|||||||
M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
|
M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
|
||||||
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
|
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: net/ipv4/
|
F: net/ipv4/
|
||||||
F: net/ipv6/
|
F: net/ipv6/
|
||||||
@@ -13686,7 +13686,6 @@ F: drivers/net/ethernet/qualcomm/emac/
|
|||||||
|
|
||||||
QUALCOMM ETHQOS ETHERNET DRIVER
|
QUALCOMM ETHQOS ETHERNET DRIVER
|
||||||
M: Vinod Koul <vkoul@kernel.org>
|
M: Vinod Koul <vkoul@kernel.org>
|
||||||
M: Niklas Cassel <niklas.cassel@linaro.org>
|
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
|
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
|
||||||
@@ -14128,6 +14127,7 @@ M: Paul Walmsley <paul.walmsley@sifive.com>
|
|||||||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||||
M: Albert Ou <aou@eecs.berkeley.edu>
|
M: Albert Ou <aou@eecs.berkeley.edu>
|
||||||
L: linux-riscv@lists.infradead.org
|
L: linux-riscv@lists.infradead.org
|
||||||
|
P: Documentation/riscv/patch-acceptance.rst
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/riscv/
|
F: arch/riscv/
|
||||||
@@ -14555,8 +14555,6 @@ F: include/linux/platform_data/spi-s3c64xx.h
|
|||||||
|
|
||||||
SAMSUNG SXGBE DRIVERS
|
SAMSUNG SXGBE DRIVERS
|
||||||
M: Byungho An <bh74.an@samsung.com>
|
M: Byungho An <bh74.an@samsung.com>
|
||||||
M: Girish K S <ks.giri@samsung.com>
|
|
||||||
M: Vipul Pandya <vipul.pandya@samsung.com>
|
|
||||||
S: Supported
|
S: Supported
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
F: drivers/net/ethernet/samsung/sxgbe/
|
F: drivers/net/ethernet/samsung/sxgbe/
|
||||||
|
|||||||
2
Makefile
2
Makefile
@@ -2,7 +2,7 @@
|
|||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 5
|
PATCHLEVEL = 5
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc5
|
||||||
NAME = Kleptomaniac Octopus
|
NAME = Kleptomaniac Octopus
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|||||||
@@ -162,7 +162,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||||
ST2 r58, r59, PT_sp + 12
|
ST2 r58, r59, PT_r58
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
.endm
|
.endm
|
||||||
@@ -172,8 +172,8 @@
|
|||||||
|
|
||||||
LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
|
LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
|
||||||
|
|
||||||
ld r12, [sp, PT_sp + 4]
|
ld r12, [sp, PT_r12]
|
||||||
ld r30, [sp, PT_sp + 8]
|
ld r30, [sp, PT_r30]
|
||||||
|
|
||||||
; Restore SP (into AUX_USER_SP) only if returning to U mode
|
; Restore SP (into AUX_USER_SP) only if returning to U mode
|
||||||
; - for K mode, it will be implicitly restored as stack is unwound
|
; - for K mode, it will be implicitly restored as stack is unwound
|
||||||
@@ -190,7 +190,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||||
LD2 r58, r59, PT_sp + 12
|
LD2 r58, r59, PT_r58
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|||||||
@@ -8,7 +8,6 @@
|
|||||||
#define _ASM_ARC_HUGEPAGE_H
|
#define _ASM_ARC_HUGEPAGE_H
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#define __ARCH_USE_5LEVEL_HACK
|
|
||||||
#include <asm-generic/pgtable-nopmd.h>
|
#include <asm-generic/pgtable-nopmd.h>
|
||||||
|
|
||||||
static inline pte_t pmd_pte(pmd_t pmd)
|
static inline pte_t pmd_pte(pmd_t pmd)
|
||||||
|
|||||||
@@ -66,7 +66,15 @@ int main(void)
|
|||||||
|
|
||||||
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
|
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
|
||||||
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
|
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
|
||||||
DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
|
|
||||||
|
#ifdef CONFIG_ISA_ARCV2
|
||||||
|
OFFSET(PT_r12, pt_regs, r12);
|
||||||
|
OFFSET(PT_r30, pt_regs, r30);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||||
|
OFFSET(PT_r58, pt_regs, r58);
|
||||||
|
OFFSET(PT_r59, pt_regs, r59);
|
||||||
|
#endif
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
menuconfig ARC_PLAT_EZNPS
|
menuconfig ARC_PLAT_EZNPS
|
||||||
bool "\"EZchip\" ARC dev platform"
|
bool "\"EZchip\" ARC dev platform"
|
||||||
select CPU_BIG_ENDIAN
|
select CPU_BIG_ENDIAN
|
||||||
select CLKSRC_NPS
|
select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
|
||||||
select EZNPS_GIC
|
select EZNPS_GIC
|
||||||
select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
|
select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
|
||||||
help
|
help
|
||||||
|
|||||||
@@ -85,13 +85,12 @@
|
|||||||
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
|
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||||
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
|
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
|
||||||
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
|
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
|
||||||
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
|
|
||||||
|
|
||||||
#define __P000 PAGE_NONE
|
#define __P000 PAGE_NONE
|
||||||
#define __P001 PAGE_READONLY
|
#define __P001 PAGE_READONLY
|
||||||
#define __P010 PAGE_READONLY
|
#define __P010 PAGE_READONLY
|
||||||
#define __P011 PAGE_READONLY
|
#define __P011 PAGE_READONLY
|
||||||
#define __P100 PAGE_EXECONLY
|
#define __P100 PAGE_READONLY_EXEC
|
||||||
#define __P101 PAGE_READONLY_EXEC
|
#define __P101 PAGE_READONLY_EXEC
|
||||||
#define __P110 PAGE_READONLY_EXEC
|
#define __P110 PAGE_READONLY_EXEC
|
||||||
#define __P111 PAGE_READONLY_EXEC
|
#define __P111 PAGE_READONLY_EXEC
|
||||||
@@ -100,7 +99,7 @@
|
|||||||
#define __S001 PAGE_READONLY
|
#define __S001 PAGE_READONLY
|
||||||
#define __S010 PAGE_SHARED
|
#define __S010 PAGE_SHARED
|
||||||
#define __S011 PAGE_SHARED
|
#define __S011 PAGE_SHARED
|
||||||
#define __S100 PAGE_EXECONLY
|
#define __S100 PAGE_READONLY_EXEC
|
||||||
#define __S101 PAGE_READONLY_EXEC
|
#define __S101 PAGE_READONLY_EXEC
|
||||||
#define __S110 PAGE_SHARED_EXEC
|
#define __S110 PAGE_SHARED_EXEC
|
||||||
#define __S111 PAGE_SHARED_EXEC
|
#define __S111 PAGE_SHARED_EXEC
|
||||||
|
|||||||
@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||||||
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
|
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
|
||||||
|
|
||||||
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
|
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
|
||||||
/*
|
|
||||||
* Execute-only user mappings do not have the PTE_USER bit set. All valid
|
|
||||||
* kernel mappings have the PTE_UXN bit set.
|
|
||||||
*/
|
|
||||||
#define pte_valid_not_user(pte) \
|
#define pte_valid_not_user(pte) \
|
||||||
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
|
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
|
||||||
#define pte_valid_young(pte) \
|
#define pte_valid_young(pte) \
|
||||||
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
|
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
|
||||||
#define pte_valid_user(pte) \
|
#define pte_valid_user(pte) \
|
||||||
@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* p??_access_permitted() is true for valid user mappings (subject to the
|
* p??_access_permitted() is true for valid user mappings (subject to the
|
||||||
* write permission check) other than user execute-only which do not have the
|
* write permission check). PROT_NONE mappings do not have the PTE_VALID bit
|
||||||
* PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
|
* set.
|
||||||
*/
|
*/
|
||||||
#define pte_access_permitted(pte, write) \
|
#define pte_access_permitted(pte, write) \
|
||||||
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
|
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
|
||||||
|
|||||||
@@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
|||||||
const struct fault_info *inf;
|
const struct fault_info *inf;
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
vm_fault_t fault, major = 0;
|
vm_fault_t fault, major = 0;
|
||||||
unsigned long vm_flags = VM_READ | VM_WRITE;
|
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
|
||||||
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||||
|
|
||||||
if (kprobe_page_fault(regs, esr))
|
if (kprobe_page_fault(regs, esr))
|
||||||
|
|||||||
@@ -1070,7 +1070,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct zone *zone;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* FIXME: Cleanup page tables (also in arch_add_memory() in case
|
* FIXME: Cleanup page tables (also in arch_add_memory() in case
|
||||||
@@ -1079,7 +1078,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
* unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
|
* unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
|
||||||
* unlocked yet.
|
* unlocked yet.
|
||||||
*/
|
*/
|
||||||
zone = page_zone(pfn_to_page(start_pfn));
|
__remove_pages(start_pfn, nr_pages, altmap);
|
||||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
|
|||||||
"1: %0 = memw_locked(%1);\n" \
|
"1: %0 = memw_locked(%1);\n" \
|
||||||
" %0 = "#op "(%0,%2);\n" \
|
" %0 = "#op "(%0,%2);\n" \
|
||||||
" memw_locked(%1,P3)=%0;\n" \
|
" memw_locked(%1,P3)=%0;\n" \
|
||||||
" if !P3 jump 1b;\n" \
|
" if (!P3) jump 1b;\n" \
|
||||||
: "=&r" (output) \
|
: "=&r" (output) \
|
||||||
: "r" (&v->counter), "r" (i) \
|
: "r" (&v->counter), "r" (i) \
|
||||||
: "memory", "p3" \
|
: "memory", "p3" \
|
||||||
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||||||
"1: %0 = memw_locked(%1);\n" \
|
"1: %0 = memw_locked(%1);\n" \
|
||||||
" %0 = "#op "(%0,%2);\n" \
|
" %0 = "#op "(%0,%2);\n" \
|
||||||
" memw_locked(%1,P3)=%0;\n" \
|
" memw_locked(%1,P3)=%0;\n" \
|
||||||
" if !P3 jump 1b;\n" \
|
" if (!P3) jump 1b;\n" \
|
||||||
: "=&r" (output) \
|
: "=&r" (output) \
|
||||||
: "r" (&v->counter), "r" (i) \
|
: "r" (&v->counter), "r" (i) \
|
||||||
: "memory", "p3" \
|
: "memory", "p3" \
|
||||||
@@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|||||||
"1: %0 = memw_locked(%2);\n" \
|
"1: %0 = memw_locked(%2);\n" \
|
||||||
" %1 = "#op "(%0,%3);\n" \
|
" %1 = "#op "(%0,%3);\n" \
|
||||||
" memw_locked(%2,P3)=%1;\n" \
|
" memw_locked(%2,P3)=%1;\n" \
|
||||||
" if !P3 jump 1b;\n" \
|
" if (!P3) jump 1b;\n" \
|
||||||
: "=&r" (output), "=&r" (val) \
|
: "=&r" (output), "=&r" (val) \
|
||||||
: "r" (&v->counter), "r" (i) \
|
: "r" (&v->counter), "r" (i) \
|
||||||
: "memory", "p3" \
|
: "memory", "p3" \
|
||||||
@@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|||||||
" }"
|
" }"
|
||||||
" memw_locked(%2, p3) = %1;"
|
" memw_locked(%2, p3) = %1;"
|
||||||
" {"
|
" {"
|
||||||
" if !p3 jump 1b;"
|
" if (!p3) jump 1b;"
|
||||||
" }"
|
" }"
|
||||||
"2:"
|
"2:"
|
||||||
: "=&r" (__oldval), "=&r" (tmp)
|
: "=&r" (__oldval), "=&r" (tmp)
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
|
|||||||
"1: R12 = memw_locked(R10);\n"
|
"1: R12 = memw_locked(R10);\n"
|
||||||
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
|
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
|
||||||
" memw_locked(R10,P1) = R12;\n"
|
" memw_locked(R10,P1) = R12;\n"
|
||||||
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
|
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||||
: "=&r" (oldval)
|
: "=&r" (oldval)
|
||||||
: "r" (addr), "r" (nr)
|
: "r" (addr), "r" (nr)
|
||||||
: "r10", "r11", "r12", "p0", "p1", "memory"
|
: "r10", "r11", "r12", "p0", "p1", "memory"
|
||||||
@@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
|
|||||||
"1: R12 = memw_locked(R10);\n"
|
"1: R12 = memw_locked(R10);\n"
|
||||||
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
|
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
|
||||||
" memw_locked(R10,P1) = R12;\n"
|
" memw_locked(R10,P1) = R12;\n"
|
||||||
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
|
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||||
: "=&r" (oldval)
|
: "=&r" (oldval)
|
||||||
: "r" (addr), "r" (nr)
|
: "r" (addr), "r" (nr)
|
||||||
: "r10", "r11", "r12", "p0", "p1", "memory"
|
: "r10", "r11", "r12", "p0", "p1", "memory"
|
||||||
@@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
|
|||||||
"1: R12 = memw_locked(R10);\n"
|
"1: R12 = memw_locked(R10);\n"
|
||||||
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
|
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
|
||||||
" memw_locked(R10,P1) = R12;\n"
|
" memw_locked(R10,P1) = R12;\n"
|
||||||
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
|
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||||
: "=&r" (oldval)
|
: "=&r" (oldval)
|
||||||
: "r" (addr), "r" (nr)
|
: "r" (addr), "r" (nr)
|
||||||
: "r10", "r11", "r12", "p0", "p1", "memory"
|
: "r10", "r11", "r12", "p0", "p1", "memory"
|
||||||
@@ -223,7 +223,7 @@ static inline int ffs(int x)
|
|||||||
int r;
|
int r;
|
||||||
|
|
||||||
asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
|
asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
|
||||||
"{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
|
"{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
|
||||||
: "=&r" (r)
|
: "=&r" (r)
|
||||||
: "r" (x)
|
: "r" (x)
|
||||||
: "p0");
|
: "p0");
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
|
|||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"1: %0 = memw_locked(%1);\n" /* load into retval */
|
"1: %0 = memw_locked(%1);\n" /* load into retval */
|
||||||
" memw_locked(%1,P0) = %2;\n" /* store into memory */
|
" memw_locked(%1,P0) = %2;\n" /* store into memory */
|
||||||
" if !P0 jump 1b;\n"
|
" if (!P0) jump 1b;\n"
|
||||||
: "=&r" (retval)
|
: "=&r" (retval)
|
||||||
: "r" (ptr), "r" (x)
|
: "r" (ptr), "r" (x)
|
||||||
: "memory", "p0"
|
: "memory", "p0"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@
|
|||||||
/* For example: %1 = %4 */ \
|
/* For example: %1 = %4 */ \
|
||||||
insn \
|
insn \
|
||||||
"2: memw_locked(%3,p2) = %1;\n" \
|
"2: memw_locked(%3,p2) = %1;\n" \
|
||||||
" if !p2 jump 1b;\n" \
|
" if (!p2) jump 1b;\n" \
|
||||||
" %1 = #0;\n" \
|
" %1 = #0;\n" \
|
||||||
"3:\n" \
|
"3:\n" \
|
||||||
".section .fixup,\"ax\"\n" \
|
".section .fixup,\"ax\"\n" \
|
||||||
@@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
|
|||||||
"1: %1 = memw_locked(%3)\n"
|
"1: %1 = memw_locked(%3)\n"
|
||||||
" {\n"
|
" {\n"
|
||||||
" p2 = cmp.eq(%1,%4)\n"
|
" p2 = cmp.eq(%1,%4)\n"
|
||||||
" if !p2.new jump:NT 3f\n"
|
" if (!p2.new) jump:NT 3f\n"
|
||||||
" }\n"
|
" }\n"
|
||||||
"2: memw_locked(%3,p2) = %5\n"
|
"2: memw_locked(%3,p2) = %5\n"
|
||||||
" if !p2 jump 1b\n"
|
" if (!p2) jump 1b\n"
|
||||||
"3:\n"
|
"3:\n"
|
||||||
".section .fixup,\"ax\"\n"
|
".section .fixup,\"ax\"\n"
|
||||||
"4: %0 = #%6\n"
|
"4: %0 = #%6\n"
|
||||||
|
|||||||
@@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr)
|
|||||||
|
|
||||||
void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
|
void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
|
||||||
#define ioremap_nocache ioremap
|
#define ioremap_nocache ioremap
|
||||||
|
#define ioremap_uc(X, Y) ioremap((X), (Y))
|
||||||
|
|
||||||
|
|
||||||
#define __raw_writel writel
|
#define __raw_writel writel
|
||||||
|
|||||||
@@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
|
|||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"1: R6 = memw_locked(%0);\n"
|
"1: R6 = memw_locked(%0);\n"
|
||||||
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
|
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
|
||||||
" { if !P3 jump 1b; }\n"
|
" { if (!P3) jump 1b; }\n"
|
||||||
" memw_locked(%0,P3) = R6;\n"
|
" memw_locked(%0,P3) = R6;\n"
|
||||||
" { if !P3 jump 1b; }\n"
|
" { if (!P3) jump 1b; }\n"
|
||||||
:
|
:
|
||||||
: "r" (&lock->lock)
|
: "r" (&lock->lock)
|
||||||
: "memory", "r6", "p3"
|
: "memory", "r6", "p3"
|
||||||
@@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
|
|||||||
"1: R6 = memw_locked(%0);\n"
|
"1: R6 = memw_locked(%0);\n"
|
||||||
" R6 = add(R6,#-1);\n"
|
" R6 = add(R6,#-1);\n"
|
||||||
" memw_locked(%0,P3) = R6\n"
|
" memw_locked(%0,P3) = R6\n"
|
||||||
" if !P3 jump 1b;\n"
|
" if (!P3) jump 1b;\n"
|
||||||
:
|
:
|
||||||
: "r" (&lock->lock)
|
: "r" (&lock->lock)
|
||||||
: "memory", "r6", "p3"
|
: "memory", "r6", "p3"
|
||||||
@@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
|
|||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" R6 = memw_locked(%1);\n"
|
" R6 = memw_locked(%1);\n"
|
||||||
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
|
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
|
||||||
" { if !P3 jump 1f; }\n"
|
" { if (!P3) jump 1f; }\n"
|
||||||
" memw_locked(%1,P3) = R6;\n"
|
" memw_locked(%1,P3) = R6;\n"
|
||||||
" { %0 = P3 }\n"
|
" { %0 = P3 }\n"
|
||||||
"1:\n"
|
"1:\n"
|
||||||
@@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
|
|||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"1: R6 = memw_locked(%0)\n"
|
"1: R6 = memw_locked(%0)\n"
|
||||||
" { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
|
" { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
|
||||||
" { if !P3 jump 1b; }\n"
|
" { if (!P3) jump 1b; }\n"
|
||||||
" memw_locked(%0,P3) = R6;\n"
|
" memw_locked(%0,P3) = R6;\n"
|
||||||
" { if !P3 jump 1b; }\n"
|
" { if (!P3) jump 1b; }\n"
|
||||||
:
|
:
|
||||||
: "r" (&lock->lock)
|
: "r" (&lock->lock)
|
||||||
: "memory", "r6", "p3"
|
: "memory", "r6", "p3"
|
||||||
@@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
|
|||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" R6 = memw_locked(%1)\n"
|
" R6 = memw_locked(%1)\n"
|
||||||
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
|
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
|
||||||
" { if !P3 jump 1f; }\n"
|
" { if (!P3) jump 1f; }\n"
|
||||||
" memw_locked(%1,P3) = R6;\n"
|
" memw_locked(%1,P3) = R6;\n"
|
||||||
" %0 = P3;\n"
|
" %0 = P3;\n"
|
||||||
"1:\n"
|
"1:\n"
|
||||||
@@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
"1: R6 = memw_locked(%0);\n"
|
"1: R6 = memw_locked(%0);\n"
|
||||||
" P3 = cmp.eq(R6,#0);\n"
|
" P3 = cmp.eq(R6,#0);\n"
|
||||||
" { if !P3 jump 1b; R6 = #1; }\n"
|
" { if (!P3) jump 1b; R6 = #1; }\n"
|
||||||
" memw_locked(%0,P3) = R6;\n"
|
" memw_locked(%0,P3) = R6;\n"
|
||||||
" { if !P3 jump 1b; }\n"
|
" { if (!P3) jump 1b; }\n"
|
||||||
:
|
:
|
||||||
: "r" (&lock->lock)
|
: "r" (&lock->lock)
|
||||||
: "memory", "r6", "p3"
|
: "memory", "r6", "p3"
|
||||||
@@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
|
|||||||
__asm__ __volatile__(
|
__asm__ __volatile__(
|
||||||
" R6 = memw_locked(%1);\n"
|
" R6 = memw_locked(%1);\n"
|
||||||
" P3 = cmp.eq(R6,#0);\n"
|
" P3 = cmp.eq(R6,#0);\n"
|
||||||
" { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
|
" { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
|
||||||
" memw_locked(%1,P3) = R6;\n"
|
" memw_locked(%1,P3) = R6;\n"
|
||||||
" %0 = P3;\n"
|
" %0 = P3;\n"
|
||||||
"1:\n"
|
"1:\n"
|
||||||
|
|||||||
@@ -11,8 +11,6 @@
|
|||||||
#include <linux/thread_info.h>
|
#include <linux/thread_info.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
register unsigned long current_frame_pointer asm("r30");
|
|
||||||
|
|
||||||
struct stackframe {
|
struct stackframe {
|
||||||
unsigned long fp;
|
unsigned long fp;
|
||||||
unsigned long rets;
|
unsigned long rets;
|
||||||
@@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace)
|
|||||||
|
|
||||||
low = (unsigned long)task_stack_page(current);
|
low = (unsigned long)task_stack_page(current);
|
||||||
high = low + THREAD_SIZE;
|
high = low + THREAD_SIZE;
|
||||||
fp = current_frame_pointer;
|
fp = (unsigned long)__builtin_frame_address(0);
|
||||||
|
|
||||||
while (fp >= low && fp <= (high - sizeof(*frame))) {
|
while (fp >= low && fp <= (high - sizeof(*frame))) {
|
||||||
frame = (struct stackframe *)fp;
|
frame = (struct stackframe *)fp;
|
||||||
|
|||||||
@@ -369,7 +369,7 @@ ret_from_fork:
|
|||||||
R26.L = #LO(do_work_pending);
|
R26.L = #LO(do_work_pending);
|
||||||
R0 = #VM_INT_DISABLE;
|
R0 = #VM_INT_DISABLE;
|
||||||
}
|
}
|
||||||
if P0 jump check_work_pending
|
if (P0) jump check_work_pending
|
||||||
{
|
{
|
||||||
R0 = R25;
|
R0 = R25;
|
||||||
callr R24
|
callr R24
|
||||||
|
|||||||
@@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct zone *zone;
|
|
||||||
|
|
||||||
zone = page_zone(pfn_to_page(start_pfn));
|
__remove_pages(start_pfn, nr_pages, altmap);
|
||||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ config MIPS
|
|||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
|
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
|
||||||
select HAVE_ASM_MODVERSIONS
|
select HAVE_ASM_MODVERSIONS
|
||||||
select HAVE_EBPF_JIT if (!CPU_MICROMIPS)
|
select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
|
||||||
select HAVE_CONTEXT_TRACKING
|
select HAVE_CONTEXT_TRACKING
|
||||||
select HAVE_COPY_THREAD_TLS
|
select HAVE_COPY_THREAD_TLS
|
||||||
select HAVE_C_RECORDMCOUNT
|
select HAVE_C_RECORDMCOUNT
|
||||||
|
|||||||
@@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
|
|||||||
-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
|
-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
|
||||||
-DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
|
-DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
|
||||||
|
|
||||||
|
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
|
||||||
|
KCOV_INSTRUMENT := n
|
||||||
|
|
||||||
# decompressor objects (linked with vmlinuz)
|
# decompressor objects (linked with vmlinuz)
|
||||||
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
|
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
|
||||||
|
|
||||||
|
|||||||
@@ -15,7 +15,8 @@
|
|||||||
static inline int __pure __get_cpu_type(const int cpu_type)
|
static inline int __pure __get_cpu_type(const int cpu_type)
|
||||||
{
|
{
|
||||||
switch (cpu_type) {
|
switch (cpu_type) {
|
||||||
#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF)
|
#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
|
||||||
|
defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
|
||||||
case CPU_LOONGSON2EF:
|
case CPU_LOONGSON2EF:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -49,8 +49,26 @@ struct thread_info {
|
|||||||
.addr_limit = KERNEL_DS, \
|
.addr_limit = KERNEL_DS, \
|
||||||
}
|
}
|
||||||
|
|
||||||
/* How to get the thread information struct from C. */
|
/*
|
||||||
|
* A pointer to the struct thread_info for the currently executing thread is
|
||||||
|
* held in register $28/$gp.
|
||||||
|
*
|
||||||
|
* We declare __current_thread_info as a global register variable rather than a
|
||||||
|
* local register variable within current_thread_info() because clang doesn't
|
||||||
|
* support explicit local register variables.
|
||||||
|
*
|
||||||
|
* When building the VDSO we take care not to declare the global register
|
||||||
|
* variable because this causes GCC to not preserve the value of $28/$gp in
|
||||||
|
* functions that change its value (which is common in the PIC VDSO when
|
||||||
|
* accessing the GOT). Since the VDSO shouldn't be accessing
|
||||||
|
* __current_thread_info anyway we declare it extern in order to cause a link
|
||||||
|
* failure if it's referenced.
|
||||||
|
*/
|
||||||
|
#ifdef __VDSO__
|
||||||
|
extern struct thread_info *__current_thread_info;
|
||||||
|
#else
|
||||||
register struct thread_info *__current_thread_info __asm__("$28");
|
register struct thread_info *__current_thread_info __asm__("$28");
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline struct thread_info *current_thread_info(void)
|
static inline struct thread_info *current_thread_info(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -26,8 +26,6 @@
|
|||||||
|
|
||||||
#define __VDSO_USE_SYSCALL ULLONG_MAX
|
#define __VDSO_USE_SYSCALL ULLONG_MAX
|
||||||
|
|
||||||
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
|
|
||||||
|
|
||||||
static __always_inline long gettimeofday_fallback(
|
static __always_inline long gettimeofday_fallback(
|
||||||
struct __kernel_old_timeval *_tv,
|
struct __kernel_old_timeval *_tv,
|
||||||
struct timezone *_tz)
|
struct timezone *_tz)
|
||||||
@@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback(
|
|||||||
return error ? -ret : ret;
|
return error ? -ret : ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
static __always_inline long gettimeofday_fallback(
|
|
||||||
struct __kernel_old_timeval *_tv,
|
|
||||||
struct timezone *_tz)
|
|
||||||
{
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static __always_inline long clock_gettime_fallback(
|
static __always_inline long clock_gettime_fallback(
|
||||||
clockid_t _clkid,
|
clockid_t _clkid,
|
||||||
struct __kernel_timespec *_ts)
|
struct __kernel_timespec *_ts)
|
||||||
|
|||||||
@@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
|
||||||
|
{
|
||||||
|
int cpu1;
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu1)
|
||||||
|
if (cpus_are_siblings(cpu, cpu1))
|
||||||
|
cpumask_set_cpu(cpu1, cpu_map);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
|
||||||
|
{
|
||||||
|
int cpu1;
|
||||||
|
int cluster = cpu_cluster(&cpu_data[cpu]);
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu1)
|
||||||
|
if (cpu_cluster(&cpu_data[cpu1]) == cluster)
|
||||||
|
cpumask_set_cpu(cpu1, cpu_map);
|
||||||
|
}
|
||||||
|
|
||||||
static int __populate_cache_leaves(unsigned int cpu)
|
static int __populate_cache_leaves(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||||
@@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu)
|
|||||||
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
||||||
|
|
||||||
if (c->icache.waysize) {
|
if (c->icache.waysize) {
|
||||||
|
/* L1 caches are per core */
|
||||||
|
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
|
||||||
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
|
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
|
||||||
|
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
|
||||||
populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
|
populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
|
||||||
} else {
|
} else {
|
||||||
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
|
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (c->scache.waysize)
|
if (c->scache.waysize) {
|
||||||
|
/* L2 cache is per cluster */
|
||||||
|
fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
|
||||||
populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
|
populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
|
||||||
|
}
|
||||||
|
|
||||||
if (c->tcache.waysize)
|
if (c->tcache.waysize)
|
||||||
populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
|
populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
|
||||||
|
|||||||
@@ -1804,7 +1804,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||||||
unsigned int image_size;
|
unsigned int image_size;
|
||||||
u8 *image_ptr;
|
u8 *image_ptr;
|
||||||
|
|
||||||
if (!prog->jit_requested || MIPS_ISA_REV < 2)
|
if (!prog->jit_requested)
|
||||||
return prog;
|
return prog;
|
||||||
|
|
||||||
tmp = bpf_jit_blind_constants(prog);
|
tmp = bpf_jit_blind_constants(prog);
|
||||||
|
|||||||
@@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock,
|
|||||||
return __cvdso_clock_gettime32(clock, ts);
|
return __cvdso_clock_gettime32(clock, ts);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is behind the ifdef so that we don't provide the symbol when there's no
|
||||||
|
* possibility of there being a usable clocksource, because there's nothing we
|
||||||
|
* can do without it. When libc fails the symbol lookup it should fall back on
|
||||||
|
* the standard syscall path.
|
||||||
|
*/
|
||||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
|
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
|
||||||
struct timezone *tz)
|
struct timezone *tz)
|
||||||
{
|
{
|
||||||
return __cvdso_gettimeofday(tv, tz);
|
return __cvdso_gettimeofday(tv, tz);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
|
||||||
|
|
||||||
int __vdso_clock_getres(clockid_t clock_id,
|
int __vdso_clock_getres(clockid_t clock_id,
|
||||||
struct old_timespec32 *res)
|
struct old_timespec32 *res)
|
||||||
{
|
{
|
||||||
@@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock,
|
|||||||
return __cvdso_clock_gettime(clock, ts);
|
return __cvdso_clock_gettime(clock, ts);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is behind the ifdef so that we don't provide the symbol when there's no
|
||||||
|
* possibility of there being a usable clocksource, because there's nothing we
|
||||||
|
* can do without it. When libc fails the symbol lookup it should fall back on
|
||||||
|
* the standard syscall path.
|
||||||
|
*/
|
||||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
|
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
|
||||||
struct timezone *tz)
|
struct timezone *tz)
|
||||||
{
|
{
|
||||||
return __cvdso_gettimeofday(tv, tz);
|
return __cvdso_gettimeofday(tv, tz);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
|
||||||
|
|
||||||
int __vdso_clock_getres(clockid_t clock_id,
|
int __vdso_clock_getres(clockid_t clock_id,
|
||||||
struct __kernel_timespec *res)
|
struct __kernel_timespec *res)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
*
|
*
|
||||||
* (the type definitions are in asm/spinlock_types.h)
|
* (the type definitions are in asm/spinlock_types.h)
|
||||||
*/
|
*/
|
||||||
|
#include <linux/jump_label.h>
|
||||||
#include <linux/irqflags.h>
|
#include <linux/irqflags.h>
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
#include <asm/paca.h>
|
#include <asm/paca.h>
|
||||||
|
|||||||
@@ -151,10 +151,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
|
__remove_pages(start_pfn, nr_pages, altmap);
|
||||||
|
|
||||||
/* Remove htab bolted mappings for this section of memory */
|
/* Remove htab bolted mappings for this section of memory */
|
||||||
start = (unsigned long)__va(start);
|
start = (unsigned long)__va(start);
|
||||||
|
|||||||
@@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) {
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline bool slice_addr_is_low(unsigned long addr)
|
static inline notrace bool slice_addr_is_low(unsigned long addr)
|
||||||
{
|
{
|
||||||
u64 tmp = (u64)addr;
|
u64 tmp = (u64)addr;
|
||||||
|
|
||||||
@@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
|
|||||||
mm_ctx_user_psize(¤t->mm->context), 1);
|
mm_ctx_user_psize(¤t->mm->context), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
|
unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
unsigned char *psizes;
|
unsigned char *psizes;
|
||||||
int index, mask_index;
|
int index, mask_index;
|
||||||
|
|||||||
@@ -64,6 +64,7 @@ config RISCV
|
|||||||
select SPARSEMEM_STATIC if 32BIT
|
select SPARSEMEM_STATIC if 32BIT
|
||||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
||||||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||||
|
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||||
|
|
||||||
config ARCH_MMAP_RND_BITS_MIN
|
config ARCH_MMAP_RND_BITS_MIN
|
||||||
default 18 if 64BIT
|
default 18 if 64BIT
|
||||||
|
|||||||
@@ -54,6 +54,7 @@
|
|||||||
reg = <1>;
|
reg = <1>;
|
||||||
riscv,isa = "rv64imafdc";
|
riscv,isa = "rv64imafdc";
|
||||||
tlb-split;
|
tlb-split;
|
||||||
|
next-level-cache = <&l2cache>;
|
||||||
cpu1_intc: interrupt-controller {
|
cpu1_intc: interrupt-controller {
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
compatible = "riscv,cpu-intc";
|
compatible = "riscv,cpu-intc";
|
||||||
@@ -77,6 +78,7 @@
|
|||||||
reg = <2>;
|
reg = <2>;
|
||||||
riscv,isa = "rv64imafdc";
|
riscv,isa = "rv64imafdc";
|
||||||
tlb-split;
|
tlb-split;
|
||||||
|
next-level-cache = <&l2cache>;
|
||||||
cpu2_intc: interrupt-controller {
|
cpu2_intc: interrupt-controller {
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
compatible = "riscv,cpu-intc";
|
compatible = "riscv,cpu-intc";
|
||||||
@@ -100,6 +102,7 @@
|
|||||||
reg = <3>;
|
reg = <3>;
|
||||||
riscv,isa = "rv64imafdc";
|
riscv,isa = "rv64imafdc";
|
||||||
tlb-split;
|
tlb-split;
|
||||||
|
next-level-cache = <&l2cache>;
|
||||||
cpu3_intc: interrupt-controller {
|
cpu3_intc: interrupt-controller {
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
compatible = "riscv,cpu-intc";
|
compatible = "riscv,cpu-intc";
|
||||||
@@ -123,6 +126,7 @@
|
|||||||
reg = <4>;
|
reg = <4>;
|
||||||
riscv,isa = "rv64imafdc";
|
riscv,isa = "rv64imafdc";
|
||||||
tlb-split;
|
tlb-split;
|
||||||
|
next-level-cache = <&l2cache>;
|
||||||
cpu4_intc: interrupt-controller {
|
cpu4_intc: interrupt-controller {
|
||||||
#interrupt-cells = <1>;
|
#interrupt-cells = <1>;
|
||||||
compatible = "riscv,cpu-intc";
|
compatible = "riscv,cpu-intc";
|
||||||
@@ -253,6 +257,17 @@
|
|||||||
#pwm-cells = <3>;
|
#pwm-cells = <3>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
l2cache: cache-controller@2010000 {
|
||||||
|
compatible = "sifive,fu540-c000-ccache", "cache";
|
||||||
|
cache-block-size = <64>;
|
||||||
|
cache-level = <2>;
|
||||||
|
cache-sets = <1024>;
|
||||||
|
cache-size = <2097152>;
|
||||||
|
cache-unified;
|
||||||
|
interrupt-parent = <&plic0>;
|
||||||
|
interrupts = <1 2 3>;
|
||||||
|
reg = <0x0 0x2010000 0x0 0x1000>;
|
||||||
|
};
|
||||||
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -116,9 +116,9 @@
|
|||||||
# define SR_PIE SR_MPIE
|
# define SR_PIE SR_MPIE
|
||||||
# define SR_PP SR_MPP
|
# define SR_PP SR_MPP
|
||||||
|
|
||||||
# define IRQ_SOFT IRQ_M_SOFT
|
# define RV_IRQ_SOFT IRQ_M_SOFT
|
||||||
# define IRQ_TIMER IRQ_M_TIMER
|
# define RV_IRQ_TIMER IRQ_M_TIMER
|
||||||
# define IRQ_EXT IRQ_M_EXT
|
# define RV_IRQ_EXT IRQ_M_EXT
|
||||||
#else /* CONFIG_RISCV_M_MODE */
|
#else /* CONFIG_RISCV_M_MODE */
|
||||||
# define CSR_STATUS CSR_SSTATUS
|
# define CSR_STATUS CSR_SSTATUS
|
||||||
# define CSR_IE CSR_SIE
|
# define CSR_IE CSR_SIE
|
||||||
@@ -133,15 +133,15 @@
|
|||||||
# define SR_PIE SR_SPIE
|
# define SR_PIE SR_SPIE
|
||||||
# define SR_PP SR_SPP
|
# define SR_PP SR_SPP
|
||||||
|
|
||||||
# define IRQ_SOFT IRQ_S_SOFT
|
# define RV_IRQ_SOFT IRQ_S_SOFT
|
||||||
# define IRQ_TIMER IRQ_S_TIMER
|
# define RV_IRQ_TIMER IRQ_S_TIMER
|
||||||
# define IRQ_EXT IRQ_S_EXT
|
# define RV_IRQ_EXT IRQ_S_EXT
|
||||||
#endif /* CONFIG_RISCV_M_MODE */
|
#endif /* CONFIG_RISCV_M_MODE */
|
||||||
|
|
||||||
/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
|
/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
|
||||||
#define IE_SIE (_AC(0x1, UL) << IRQ_SOFT)
|
#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT)
|
||||||
#define IE_TIE (_AC(0x1, UL) << IRQ_TIMER)
|
#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
|
||||||
#define IE_EIE (_AC(0x1, UL) << IRQ_EXT)
|
#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
|||||||
@@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
|||||||
*/
|
*/
|
||||||
old = *parent;
|
old = *parent;
|
||||||
|
|
||||||
if (function_graph_enter(old, self_addr, frame_pointer, parent))
|
if (!function_graph_enter(old, self_addr, frame_pointer, parent))
|
||||||
*parent = return_hooker;
|
*parent = return_hooker;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -23,11 +23,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
|
|||||||
|
|
||||||
irq_enter();
|
irq_enter();
|
||||||
switch (regs->cause & ~CAUSE_IRQ_FLAG) {
|
switch (regs->cause & ~CAUSE_IRQ_FLAG) {
|
||||||
case IRQ_TIMER:
|
case RV_IRQ_TIMER:
|
||||||
riscv_timer_interrupt();
|
riscv_timer_interrupt();
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
case IRQ_SOFT:
|
case RV_IRQ_SOFT:
|
||||||
/*
|
/*
|
||||||
* We only use software interrupts to pass IPIs, so if a non-SMP
|
* We only use software interrupts to pass IPIs, so if a non-SMP
|
||||||
* system gets one, then we don't know what to do.
|
* system gets one, then we don't know what to do.
|
||||||
@@ -35,7 +35,7 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
|
|||||||
riscv_software_interrupt();
|
riscv_software_interrupt();
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
case IRQ_EXT:
|
case RV_IRQ_EXT:
|
||||||
handle_arch_irq(regs);
|
handle_arch_irq(regs);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
|||||||
@@ -99,13 +99,13 @@ static void __init setup_initrd(void)
|
|||||||
pr_info("initrd not found or empty");
|
pr_info("initrd not found or empty");
|
||||||
goto disable;
|
goto disable;
|
||||||
}
|
}
|
||||||
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
||||||
pr_err("initrd extends beyond end of memory");
|
pr_err("initrd extends beyond end of memory");
|
||||||
goto disable;
|
goto disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
size = initrd_end - initrd_start;
|
size = initrd_end - initrd_start;
|
||||||
memblock_reserve(__pa(initrd_start), size);
|
memblock_reserve(__pa_symbol(initrd_start), size);
|
||||||
initrd_below_start_ok = 1;
|
initrd_below_start_ok = 1;
|
||||||
|
|
||||||
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
|
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
|
||||||
@@ -124,8 +124,8 @@ void __init setup_bootmem(void)
|
|||||||
{
|
{
|
||||||
struct memblock_region *reg;
|
struct memblock_region *reg;
|
||||||
phys_addr_t mem_size = 0;
|
phys_addr_t mem_size = 0;
|
||||||
phys_addr_t vmlinux_end = __pa(&_end);
|
phys_addr_t vmlinux_end = __pa_symbol(&_end);
|
||||||
phys_addr_t vmlinux_start = __pa(&_start);
|
phys_addr_t vmlinux_start = __pa_symbol(&_start);
|
||||||
|
|
||||||
/* Find the memory region containing the kernel */
|
/* Find the memory region containing the kernel */
|
||||||
for_each_memblock(memory, reg) {
|
for_each_memblock(memory, reg) {
|
||||||
@@ -445,7 +445,7 @@ static void __init setup_vm_final(void)
|
|||||||
|
|
||||||
/* Setup swapper PGD for fixmap */
|
/* Setup swapper PGD for fixmap */
|
||||||
create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
|
create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
|
||||||
__pa(fixmap_pgd_next),
|
__pa_symbol(fixmap_pgd_next),
|
||||||
PGDIR_SIZE, PAGE_TABLE);
|
PGDIR_SIZE, PAGE_TABLE);
|
||||||
|
|
||||||
/* Map all memory banks */
|
/* Map all memory banks */
|
||||||
@@ -474,7 +474,7 @@ static void __init setup_vm_final(void)
|
|||||||
clear_fixmap(FIX_PMD);
|
clear_fixmap(FIX_PMD);
|
||||||
|
|
||||||
/* Move to swapper page table */
|
/* Move to swapper page table */
|
||||||
csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
|
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
|
||||||
local_flush_tlb_all();
|
local_flush_tlb_all();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
|||||||
@@ -292,10 +292,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct zone *zone;
|
|
||||||
|
|
||||||
zone = page_zone(pfn_to_page(start_pfn));
|
__remove_pages(start_pfn, nr_pages, altmap);
|
||||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
|
||||||
vmem_remove_mapping(start, size);
|
vmem_remove_mapping(start, size);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||||
|
|||||||
@@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
{
|
{
|
||||||
unsigned long start_pfn = PFN_DOWN(start);
|
unsigned long start_pfn = PFN_DOWN(start);
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct zone *zone;
|
|
||||||
|
|
||||||
zone = page_zone(pfn_to_page(start_pfn));
|
__remove_pages(start_pfn, nr_pages, altmap);
|
||||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||||
|
|||||||
@@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct zone *zone;
|
|
||||||
|
|
||||||
zone = page_zone(pfn_to_page(start_pfn));
|
__remove_pages(start_pfn, nr_pages, altmap);
|
||||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|||||||
@@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
|
|||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
|
|
||||||
struct zone *zone = page_zone(page);
|
|
||||||
|
|
||||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
__remove_pages(start_pfn, nr_pages, altmap);
|
||||||
kernel_physical_mapping_remove(start, start + size);
|
kernel_physical_mapping_remove(start, start + size);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||||
|
|||||||
39
block/bio.c
39
block/bio.c
@@ -538,6 +538,45 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(zero_fill_bio_iter);
|
EXPORT_SYMBOL(zero_fill_bio_iter);
|
||||||
|
|
||||||
|
void bio_truncate(struct bio *bio, unsigned new_size)
|
||||||
|
{
|
||||||
|
struct bio_vec bv;
|
||||||
|
struct bvec_iter iter;
|
||||||
|
unsigned int done = 0;
|
||||||
|
bool truncated = false;
|
||||||
|
|
||||||
|
if (new_size >= bio->bi_iter.bi_size)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (bio_data_dir(bio) != READ)
|
||||||
|
goto exit;
|
||||||
|
|
||||||
|
bio_for_each_segment(bv, bio, iter) {
|
||||||
|
if (done + bv.bv_len > new_size) {
|
||||||
|
unsigned offset;
|
||||||
|
|
||||||
|
if (!truncated)
|
||||||
|
offset = new_size - done;
|
||||||
|
else
|
||||||
|
offset = 0;
|
||||||
|
zero_user(bv.bv_page, offset, bv.bv_len - offset);
|
||||||
|
truncated = true;
|
||||||
|
}
|
||||||
|
done += bv.bv_len;
|
||||||
|
}
|
||||||
|
|
||||||
|
exit:
|
||||||
|
/*
|
||||||
|
* Don't touch bvec table here and make it really immutable, since
|
||||||
|
* fs bio user has to retrieve all pages via bio_for_each_segment_all
|
||||||
|
* in its .end_bio() callback.
|
||||||
|
*
|
||||||
|
* It is enough to truncate bio by updating .bi_size since we can make
|
||||||
|
* correct bvec with the updated .bi_size for drivers.
|
||||||
|
*/
|
||||||
|
bio->bi_iter.bi_size = new_size;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bio_put - release a reference to a bio
|
* bio_put - release a reference to a bio
|
||||||
* @bio: bio to release reference to
|
* @bio: bio to release reference to
|
||||||
|
|||||||
@@ -157,16 +157,14 @@ static inline unsigned get_max_io_size(struct request_queue *q,
|
|||||||
return sectors & (lbs - 1);
|
return sectors & (lbs - 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned get_max_segment_size(const struct request_queue *q,
|
static inline unsigned get_max_segment_size(const struct request_queue *q,
|
||||||
unsigned offset)
|
struct page *start_page,
|
||||||
|
unsigned long offset)
|
||||||
{
|
{
|
||||||
unsigned long mask = queue_segment_boundary(q);
|
unsigned long mask = queue_segment_boundary(q);
|
||||||
|
|
||||||
/* default segment boundary mask means no boundary limit */
|
offset = mask & (page_to_phys(start_page) + offset);
|
||||||
if (mask == BLK_SEG_BOUNDARY_MASK)
|
return min_t(unsigned long, mask - offset + 1,
|
||||||
return queue_max_segment_size(q);
|
|
||||||
|
|
||||||
return min_t(unsigned long, mask - (mask & offset) + 1,
|
|
||||||
queue_max_segment_size(q));
|
queue_max_segment_size(q));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -201,7 +199,8 @@ static bool bvec_split_segs(const struct request_queue *q,
|
|||||||
unsigned seg_size = 0;
|
unsigned seg_size = 0;
|
||||||
|
|
||||||
while (len && *nsegs < max_segs) {
|
while (len && *nsegs < max_segs) {
|
||||||
seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
|
seg_size = get_max_segment_size(q, bv->bv_page,
|
||||||
|
bv->bv_offset + total_len);
|
||||||
seg_size = min(seg_size, len);
|
seg_size = min(seg_size, len);
|
||||||
|
|
||||||
(*nsegs)++;
|
(*nsegs)++;
|
||||||
@@ -419,7 +418,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
|
|||||||
|
|
||||||
while (nbytes > 0) {
|
while (nbytes > 0) {
|
||||||
unsigned offset = bvec->bv_offset + total;
|
unsigned offset = bvec->bv_offset + total;
|
||||||
unsigned len = min(get_max_segment_size(q, offset), nbytes);
|
unsigned len = min(get_max_segment_size(q, bvec->bv_page,
|
||||||
|
offset), nbytes);
|
||||||
struct page *page = bvec->bv_page;
|
struct page *page = bvec->bv_page;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@@ -374,7 +374,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
|
|||||||
here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
|
here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
|
||||||
dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
|
dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
|
||||||
<< MID_DMA_VCI_SHIFT) | MID_DT_JK;
|
<< MID_DMA_VCI_SHIFT) | MID_DT_JK;
|
||||||
j++;
|
dma[j++] = 0;
|
||||||
}
|
}
|
||||||
here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
|
here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
|
||||||
if (!eff) size += skip;
|
if (!eff) size += skip;
|
||||||
@@ -447,7 +447,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
|
|||||||
if (size != eff) {
|
if (size != eff) {
|
||||||
dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
|
dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
|
||||||
(vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
|
(vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
|
||||||
j++;
|
dma[j++] = 0;
|
||||||
}
|
}
|
||||||
if (!j || j > 2*RX_DMA_BUF) {
|
if (!j || j > 2*RX_DMA_BUF) {
|
||||||
printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
|
printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
|
||||||
|
|||||||
@@ -186,6 +186,9 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
|
|||||||
if (zone->cond == BLK_ZONE_COND_FULL)
|
if (zone->cond == BLK_ZONE_COND_FULL)
|
||||||
return BLK_STS_IOERR;
|
return BLK_STS_IOERR;
|
||||||
|
|
||||||
|
if (zone->wp == zone->start)
|
||||||
|
zone->cond = BLK_ZONE_COND_EMPTY;
|
||||||
|
else
|
||||||
zone->cond = BLK_ZONE_COND_CLOSED;
|
zone->cond = BLK_ZONE_COND_CLOSED;
|
||||||
break;
|
break;
|
||||||
case REQ_OP_ZONE_FINISH:
|
case REQ_OP_ZONE_FINISH:
|
||||||
|
|||||||
@@ -84,7 +84,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
|
|||||||
unsigned int cdev = 0;
|
unsigned int cdev = 0;
|
||||||
u32 mnistat, tnistat, tstatus, mcmd;
|
u32 mnistat, tnistat, tstatus, mcmd;
|
||||||
u16 tnicmd, mnicmd;
|
u16 tnicmd, mnicmd;
|
||||||
u8 mcapndx;
|
|
||||||
u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
|
u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
|
||||||
u32 step, rem, rem_isoch, rem_async;
|
u32 step, rem, rem_isoch, rem_async;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@@ -138,8 +137,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
|
|||||||
cur = list_entry(pos, struct agp_3_5_dev, list);
|
cur = list_entry(pos, struct agp_3_5_dev, list);
|
||||||
dev = cur->dev;
|
dev = cur->dev;
|
||||||
|
|
||||||
mcapndx = cur->capndx;
|
|
||||||
|
|
||||||
pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
|
pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
|
||||||
|
|
||||||
master[cdev].maxbw = (mnistat >> 16) & 0xff;
|
master[cdev].maxbw = (mnistat >> 16) & 0xff;
|
||||||
@@ -251,8 +248,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
|
|||||||
cur = master[cdev].dev;
|
cur = master[cdev].dev;
|
||||||
dev = cur->dev;
|
dev = cur->dev;
|
||||||
|
|
||||||
mcapndx = cur->capndx;
|
|
||||||
|
|
||||||
master[cdev].rq += (cdev == ndevs - 1)
|
master[cdev].rq += (cdev == ndevs - 1)
|
||||||
? (rem_async + rem_isoch) : step;
|
? (rem_async + rem_isoch) : step;
|
||||||
|
|
||||||
@@ -319,7 +314,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
|
|||||||
{
|
{
|
||||||
struct pci_dev *td = bridge->dev, *dev = NULL;
|
struct pci_dev *td = bridge->dev, *dev = NULL;
|
||||||
u8 mcapndx;
|
u8 mcapndx;
|
||||||
u32 isoch, arqsz;
|
u32 isoch;
|
||||||
u32 tstatus, mstatus, ncapid;
|
u32 tstatus, mstatus, ncapid;
|
||||||
u32 mmajor;
|
u32 mmajor;
|
||||||
u16 mpstat;
|
u16 mpstat;
|
||||||
@@ -334,8 +329,6 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
|
|||||||
if (isoch == 0) /* isoch xfers not available, bail out. */
|
if (isoch == 0) /* isoch xfers not available, bail out. */
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
arqsz = (tstatus >> 13) & 0x7;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate a head for our AGP 3.5 device list
|
* Allocate a head for our AGP 3.5 device list
|
||||||
* (multiple AGP v3 devices are allowed behind a single bridge).
|
* (multiple AGP v3 devices are allowed behind a single bridge).
|
||||||
|
|||||||
@@ -130,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
|
|||||||
priv->response_read = true;
|
priv->response_read = true;
|
||||||
|
|
||||||
ret_size = min_t(ssize_t, size, priv->response_length);
|
ret_size = min_t(ssize_t, size, priv->response_length);
|
||||||
if (!ret_size) {
|
if (ret_size <= 0) {
|
||||||
priv->response_length = 0;
|
priv->response_length = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ struct file_priv {
|
|||||||
struct work_struct timeout_work;
|
struct work_struct timeout_work;
|
||||||
struct work_struct async_work;
|
struct work_struct async_work;
|
||||||
wait_queue_head_t async_wait;
|
wait_queue_head_t async_wait;
|
||||||
size_t response_length;
|
ssize_t response_length;
|
||||||
bool response_read;
|
bool response_read;
|
||||||
bool command_enqueued;
|
bool command_enqueued;
|
||||||
|
|
||||||
|
|||||||
@@ -978,13 +978,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||||||
|
|
||||||
if (wait_startup(chip, 0) != 0) {
|
if (wait_startup(chip, 0) != 0) {
|
||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
goto err_start;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Take control of the TPM's interrupt hardware and shut it off */
|
/* Take control of the TPM's interrupt hardware and shut it off */
|
||||||
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
|
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto err_start;
|
goto out_err;
|
||||||
|
|
||||||
intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
|
intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
|
||||||
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
|
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
|
||||||
@@ -993,21 +993,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||||||
|
|
||||||
rc = tpm_chip_start(chip);
|
rc = tpm_chip_start(chip);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_start;
|
goto out_err;
|
||||||
|
|
||||||
rc = tpm2_probe(chip);
|
rc = tpm2_probe(chip);
|
||||||
|
tpm_chip_stop(chip);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_probe;
|
goto out_err;
|
||||||
|
|
||||||
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
|
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto err_probe;
|
goto out_err;
|
||||||
|
|
||||||
priv->manufacturer_id = vendor;
|
priv->manufacturer_id = vendor;
|
||||||
|
|
||||||
rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
|
rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto err_probe;
|
goto out_err;
|
||||||
|
|
||||||
dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
|
dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
|
||||||
(chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
|
(chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
|
||||||
@@ -1016,13 +1016,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||||||
probe = probe_itpm(chip);
|
probe = probe_itpm(chip);
|
||||||
if (probe < 0) {
|
if (probe < 0) {
|
||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
goto err_probe;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Figure out the capabilities */
|
/* Figure out the capabilities */
|
||||||
rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
|
rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto err_probe;
|
goto out_err;
|
||||||
|
|
||||||
dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
|
dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
|
||||||
intfcaps);
|
intfcaps);
|
||||||
@@ -1056,10 +1056,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||||||
if (tpm_get_timeouts(chip)) {
|
if (tpm_get_timeouts(chip)) {
|
||||||
dev_err(dev, "Could not get TPM timeouts and durations\n");
|
dev_err(dev, "Could not get TPM timeouts and durations\n");
|
||||||
rc = -ENODEV;
|
rc = -ENODEV;
|
||||||
goto err_probe;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
chip->flags |= TPM_CHIP_FLAG_IRQ;
|
|
||||||
if (irq) {
|
if (irq) {
|
||||||
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
|
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
|
||||||
irq);
|
irq);
|
||||||
@@ -1071,18 +1070,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tpm_chip_stop(chip);
|
|
||||||
|
|
||||||
rc = tpm_chip_register(chip);
|
rc = tpm_chip_register(chip);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto err_start;
|
goto out_err;
|
||||||
|
|
||||||
|
if (chip->ops->clk_enable != NULL)
|
||||||
|
chip->ops->clk_enable(chip, false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
out_err:
|
||||||
err_probe:
|
|
||||||
tpm_chip_stop(chip);
|
|
||||||
|
|
||||||
err_start:
|
|
||||||
if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
|
if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
|
||||||
chip->ops->clk_enable(chip, false);
|
chip->ops->clk_enable(chip, false);
|
||||||
|
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
|
|||||||
return get_cycles64();
|
return get_cycles64();
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 riscv_sched_clock(void)
|
static u64 notrace riscv_sched_clock(void)
|
||||||
{
|
{
|
||||||
return get_cycles64();
|
return get_cycles64();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -999,7 +999,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
|
|||||||
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
|
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
|
||||||
.nb_channels = 6,
|
.nb_channels = 6,
|
||||||
.transfer_ord_max = 5,
|
.transfer_ord_max = 5,
|
||||||
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
|
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
|
||||||
|
JZ_SOC_DATA_BREAK_LINKS,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
|
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
|
||||||
|
|||||||
@@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
|
|||||||
|
|
||||||
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
|
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
|
||||||
SZ_2M, &descs->hw, flags);
|
SZ_2M, &descs->hw, flags);
|
||||||
if (!descs->virt && (i > 0)) {
|
if (!descs->virt) {
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
for (idx = 0; idx < i; idx++) {
|
for (idx = 0; idx < i; idx++) {
|
||||||
|
descs = &ioat_chan->descs[idx];
|
||||||
dma_free_coherent(to_dev(ioat_chan), SZ_2M,
|
dma_free_coherent(to_dev(ioat_chan), SZ_2M,
|
||||||
descs->virt, descs->hw);
|
descs->virt, descs->hw);
|
||||||
descs->virt = NULL;
|
descs->virt = NULL;
|
||||||
|
|||||||
@@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
|
|||||||
c = p->vchan;
|
c = p->vchan;
|
||||||
if (c && (tc1 & BIT(i))) {
|
if (c && (tc1 & BIT(i))) {
|
||||||
spin_lock_irqsave(&c->vc.lock, flags);
|
spin_lock_irqsave(&c->vc.lock, flags);
|
||||||
|
if (p->ds_run != NULL) {
|
||||||
vchan_cookie_complete(&p->ds_run->vd);
|
vchan_cookie_complete(&p->ds_run->vd);
|
||||||
p->ds_done = p->ds_run;
|
p->ds_done = p->ds_run;
|
||||||
p->ds_run = NULL;
|
p->ds_run = NULL;
|
||||||
|
}
|
||||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||||
}
|
}
|
||||||
if (c && (tc2 & BIT(i))) {
|
if (c && (tc2 & BIT(i))) {
|
||||||
@@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
|
|||||||
if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
|
if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
|
/* Avoid losing track of ds_run if a transaction is in flight */
|
||||||
|
if (c->phy->ds_run)
|
||||||
|
return -EAGAIN;
|
||||||
|
|
||||||
if (vd) {
|
if (vd) {
|
||||||
struct k3_dma_desc_sw *ds =
|
struct k3_dma_desc_sw *ds =
|
||||||
container_of(vd, struct k3_dma_desc_sw, vd);
|
container_of(vd, struct k3_dma_desc_sw, vd);
|
||||||
|
|||||||
@@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg)
|
|||||||
dmaengine_desc_get_callback(&vd->tx, &cb);
|
dmaengine_desc_get_callback(&vd->tx, &cb);
|
||||||
|
|
||||||
list_del(&vd->node);
|
list_del(&vd->node);
|
||||||
vchan_vdesc_fini(vd);
|
|
||||||
|
|
||||||
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
|
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
|
||||||
|
vchan_vdesc_fini(vd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev)
|
|||||||
fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
|
fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
|
||||||
TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
|
TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
|
||||||
if (IS_ERR(fw_shm_pool)) {
|
if (IS_ERR(fw_shm_pool)) {
|
||||||
tee_client_close_context(pvt_data.ctx);
|
|
||||||
dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
|
dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
|
||||||
err = PTR_ERR(fw_shm_pool);
|
err = PTR_ERR(fw_shm_pool);
|
||||||
goto out_sess;
|
goto out_sess;
|
||||||
|
|||||||
@@ -613,7 +613,17 @@ static bool amdgpu_atpx_detect(void)
|
|||||||
bool d3_supported = false;
|
bool d3_supported = false;
|
||||||
struct pci_dev *parent_pdev;
|
struct pci_dev *parent_pdev;
|
||||||
|
|
||||||
while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) {
|
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
|
||||||
|
vga_count++;
|
||||||
|
|
||||||
|
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||||
|
|
||||||
|
parent_pdev = pci_upstream_bridge(pdev);
|
||||||
|
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
|
||||||
|
amdgpu_atpx_get_quirks(pdev);
|
||||||
|
}
|
||||||
|
|
||||||
|
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
|
||||||
vga_count++;
|
vga_count++;
|
||||||
|
|
||||||
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
|
||||||
|
|||||||
@@ -1488,7 +1488,7 @@ out:
|
|||||||
|
|
||||||
/* Start rlc autoload after psp recieved all the gfx firmware */
|
/* Start rlc autoload after psp recieved all the gfx firmware */
|
||||||
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
|
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
|
||||||
AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
|
AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
|
||||||
ret = psp_rlc_autoload(psp);
|
ret = psp_rlc_autoload(psp);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Failed to start rlc autoload\n");
|
DRM_ERROR("Failed to start rlc autoload\n");
|
||||||
|
|||||||
@@ -292,10 +292,10 @@ enum AMDGPU_UCODE_ID {
|
|||||||
AMDGPU_UCODE_ID_CP_MEC2_JT,
|
AMDGPU_UCODE_ID_CP_MEC2_JT,
|
||||||
AMDGPU_UCODE_ID_CP_MES,
|
AMDGPU_UCODE_ID_CP_MES,
|
||||||
AMDGPU_UCODE_ID_CP_MES_DATA,
|
AMDGPU_UCODE_ID_CP_MES_DATA,
|
||||||
AMDGPU_UCODE_ID_RLC_G,
|
|
||||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
|
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
|
||||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
|
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
|
||||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
|
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
|
||||||
|
AMDGPU_UCODE_ID_RLC_G,
|
||||||
AMDGPU_UCODE_ID_STORAGE,
|
AMDGPU_UCODE_ID_STORAGE,
|
||||||
AMDGPU_UCODE_ID_SMC,
|
AMDGPU_UCODE_ID_SMC,
|
||||||
AMDGPU_UCODE_ID_UVD,
|
AMDGPU_UCODE_ID_UVD,
|
||||||
|
|||||||
@@ -1052,17 +1052,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
|||||||
case CHIP_VEGA20:
|
case CHIP_VEGA20:
|
||||||
break;
|
break;
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
/* Disable GFXOFF on original raven. There are combinations
|
if (!(adev->rev_id >= 0x8 ||
|
||||||
* of sbios and platforms that are not stable.
|
adev->pdev->device == 0x15d8) &&
|
||||||
*/
|
(adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
|
||||||
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
|
!adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
|
||||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
|
||||||
else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
|
|
||||||
&&((adev->gfx.rlc_fw_version != 106 &&
|
|
||||||
adev->gfx.rlc_fw_version < 531) ||
|
|
||||||
(adev->gfx.rlc_fw_version == 53815) ||
|
|
||||||
(adev->gfx.rlc_feature_version < 1) ||
|
|
||||||
!adev->gfx.rlc.is_rlc_v2_1))
|
|
||||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||||
|
|
||||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||||
|
|||||||
@@ -866,6 +866,7 @@ static int smu_sw_init(void *handle)
|
|||||||
smu->smu_baco.platform_support = false;
|
smu->smu_baco.platform_support = false;
|
||||||
|
|
||||||
mutex_init(&smu->sensor_lock);
|
mutex_init(&smu->sensor_lock);
|
||||||
|
mutex_init(&smu->metrics_lock);
|
||||||
|
|
||||||
smu->watermarks_bitmap = 0;
|
smu->watermarks_bitmap = 0;
|
||||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||||
|
|||||||
@@ -862,18 +862,21 @@ static int arcturus_get_metrics_table(struct smu_context *smu,
|
|||||||
struct smu_table_context *smu_table= &smu->smu_table;
|
struct smu_table_context *smu_table= &smu->smu_table;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
mutex_lock(&smu->metrics_lock);
|
||||||
if (!smu_table->metrics_time ||
|
if (!smu_table->metrics_time ||
|
||||||
time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
|
time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
|
||||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
||||||
(void *)smu_table->metrics_table, false);
|
(void *)smu_table->metrics_table, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_info("Failed to export SMU metrics table!\n");
|
pr_info("Failed to export SMU metrics table!\n");
|
||||||
|
mutex_unlock(&smu->metrics_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
smu_table->metrics_time = jiffies;
|
smu_table->metrics_time = jiffies;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
|
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
|
||||||
|
mutex_unlock(&smu->metrics_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -349,6 +349,7 @@ struct smu_context
|
|||||||
const struct pptable_funcs *ppt_funcs;
|
const struct pptable_funcs *ppt_funcs;
|
||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
struct mutex sensor_lock;
|
struct mutex sensor_lock;
|
||||||
|
struct mutex metrics_lock;
|
||||||
uint64_t pool_size;
|
uint64_t pool_size;
|
||||||
|
|
||||||
struct smu_table_context smu_table;
|
struct smu_table_context smu_table;
|
||||||
|
|||||||
@@ -562,17 +562,20 @@ static int navi10_get_metrics_table(struct smu_context *smu,
|
|||||||
struct smu_table_context *smu_table= &smu->smu_table;
|
struct smu_table_context *smu_table= &smu->smu_table;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
mutex_lock(&smu->metrics_lock);
|
||||||
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
|
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
|
||||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
||||||
(void *)smu_table->metrics_table, false);
|
(void *)smu_table->metrics_table, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_info("Failed to export SMU metrics table!\n");
|
pr_info("Failed to export SMU metrics table!\n");
|
||||||
|
mutex_unlock(&smu->metrics_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
smu_table->metrics_time = jiffies;
|
smu_table->metrics_time = jiffies;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
|
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
|
||||||
|
mutex_unlock(&smu->metrics_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1678,17 +1678,20 @@ static int vega20_get_metrics_table(struct smu_context *smu,
|
|||||||
struct smu_table_context *smu_table= &smu->smu_table;
|
struct smu_table_context *smu_table= &smu->smu_table;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
mutex_lock(&smu->metrics_lock);
|
||||||
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
|
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
|
||||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
||||||
(void *)smu_table->metrics_table, false);
|
(void *)smu_table->metrics_table, false);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_info("Failed to export SMU metrics table!\n");
|
pr_info("Failed to export SMU metrics table!\n");
|
||||||
|
mutex_unlock(&smu->metrics_lock);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
smu_table->metrics_time = jiffies;
|
smu_table->metrics_time = jiffies;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
|
memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
|
||||||
|
mutex_unlock(&smu->metrics_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector,
|
|||||||
return MODE_OK;
|
return MODE_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
|
static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
|
||||||
.get_modes = malidp_mw_connector_get_modes,
|
.get_modes = malidp_mw_connector_get_modes,
|
||||||
.mode_valid = malidp_mw_connector_mode_valid,
|
.mode_valid = malidp_mw_connector_mode_valid,
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -215,11 +215,12 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
|
|||||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||||
struct mtk_ddp_comp *comp;
|
struct mtk_ddp_comp *comp;
|
||||||
int i, count = 0;
|
int i, count = 0;
|
||||||
|
unsigned int local_index = plane - mtk_crtc->planes;
|
||||||
|
|
||||||
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
|
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
|
||||||
comp = mtk_crtc->ddp_comp[i];
|
comp = mtk_crtc->ddp_comp[i];
|
||||||
if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) {
|
if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
|
||||||
*local_layer = plane->index - count;
|
*local_layer = local_index - count;
|
||||||
return comp;
|
return comp;
|
||||||
}
|
}
|
||||||
count += mtk_ddp_comp_layer_nr(comp);
|
count += mtk_ddp_comp_layer_nr(comp);
|
||||||
@@ -310,7 +311,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
|
|||||||
|
|
||||||
plane_state = to_mtk_plane_state(plane->state);
|
plane_state = to_mtk_plane_state(plane->state);
|
||||||
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
|
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
|
||||||
mtk_ddp_comp_layer_config(comp, local_layer, plane_state);
|
if (comp)
|
||||||
|
mtk_ddp_comp_layer_config(comp, local_layer,
|
||||||
|
plane_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -386,6 +389,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
|
|||||||
comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
|
comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
|
||||||
&local_layer);
|
&local_layer);
|
||||||
|
|
||||||
|
if (comp)
|
||||||
mtk_ddp_comp_layer_config(comp, local_layer,
|
mtk_ddp_comp_layer_config(comp, local_layer,
|
||||||
plane_state);
|
plane_state);
|
||||||
plane_state->pending.config = false;
|
plane_state->pending.config = false;
|
||||||
@@ -401,7 +405,9 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
|
|||||||
struct mtk_ddp_comp *comp;
|
struct mtk_ddp_comp *comp;
|
||||||
|
|
||||||
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
|
comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
|
||||||
|
if (comp)
|
||||||
return mtk_ddp_comp_layer_check(comp, local_layer, state);
|
return mtk_ddp_comp_layer_check(comp, local_layer, state);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
|
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||||
|
|||||||
@@ -230,28 +230,25 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
|
|||||||
static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
|
static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
|
||||||
{
|
{
|
||||||
u32 timcon0, timcon1, timcon2, timcon3;
|
u32 timcon0, timcon1, timcon2, timcon3;
|
||||||
u32 ui, cycle_time;
|
u32 data_rate_mhz = DIV_ROUND_UP(dsi->data_rate, 1000000);
|
||||||
struct mtk_phy_timing *timing = &dsi->phy_timing;
|
struct mtk_phy_timing *timing = &dsi->phy_timing;
|
||||||
|
|
||||||
ui = DIV_ROUND_UP(1000000000, dsi->data_rate);
|
timing->lpx = (60 * data_rate_mhz / (8 * 1000)) + 1;
|
||||||
cycle_time = div_u64(8000000000ULL, dsi->data_rate);
|
timing->da_hs_prepare = (80 * data_rate_mhz + 4 * 1000) / 8000;
|
||||||
|
timing->da_hs_zero = (170 * data_rate_mhz + 10 * 1000) / 8000 + 1 -
|
||||||
|
timing->da_hs_prepare;
|
||||||
|
timing->da_hs_trail = timing->da_hs_prepare + 1;
|
||||||
|
|
||||||
timing->lpx = NS_TO_CYCLE(60, cycle_time);
|
timing->ta_go = 4 * timing->lpx - 2;
|
||||||
timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time);
|
timing->ta_sure = timing->lpx + 2;
|
||||||
timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time);
|
timing->ta_get = 4 * timing->lpx;
|
||||||
timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time);
|
timing->da_hs_exit = 2 * timing->lpx + 1;
|
||||||
|
|
||||||
timing->ta_go = 4 * timing->lpx;
|
timing->clk_hs_prepare = 70 * data_rate_mhz / (8 * 1000);
|
||||||
timing->ta_sure = 3 * timing->lpx / 2;
|
timing->clk_hs_post = timing->clk_hs_prepare + 8;
|
||||||
timing->ta_get = 5 * timing->lpx;
|
timing->clk_hs_trail = timing->clk_hs_prepare;
|
||||||
timing->da_hs_exit = 2 * timing->lpx;
|
timing->clk_hs_zero = timing->clk_hs_trail * 4;
|
||||||
|
timing->clk_hs_exit = 2 * timing->clk_hs_trail;
|
||||||
timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time);
|
|
||||||
timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10;
|
|
||||||
|
|
||||||
timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time);
|
|
||||||
timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time);
|
|
||||||
timing->clk_hs_exit = 2 * timing->lpx;
|
|
||||||
|
|
||||||
timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
|
timcon0 = timing->lpx | timing->da_hs_prepare << 8 |
|
||||||
timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
|
timing->da_hs_zero << 16 | timing->da_hs_trail << 24;
|
||||||
@@ -482,27 +479,39 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
|
|||||||
dsi_tmp_buf_bpp - 10);
|
dsi_tmp_buf_bpp - 10);
|
||||||
|
|
||||||
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
|
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
|
||||||
timing->da_hs_zero + timing->da_hs_exit + 2;
|
timing->da_hs_zero + timing->da_hs_exit + 3;
|
||||||
|
|
||||||
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
|
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
|
||||||
if (vm->hfront_porch * dsi_tmp_buf_bpp >
|
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
|
||||||
data_phy_cycles * dsi->lanes + 18) {
|
data_phy_cycles * dsi->lanes + 18) {
|
||||||
horizontal_frontporch_byte = vm->hfront_porch *
|
horizontal_frontporch_byte =
|
||||||
dsi_tmp_buf_bpp -
|
vm->hfront_porch * dsi_tmp_buf_bpp -
|
||||||
data_phy_cycles *
|
(data_phy_cycles * dsi->lanes + 18) *
|
||||||
dsi->lanes - 18;
|
vm->hfront_porch /
|
||||||
|
(vm->hfront_porch + vm->hback_porch);
|
||||||
|
|
||||||
|
horizontal_backporch_byte =
|
||||||
|
horizontal_backporch_byte -
|
||||||
|
(data_phy_cycles * dsi->lanes + 18) *
|
||||||
|
vm->hback_porch /
|
||||||
|
(vm->hfront_porch + vm->hback_porch);
|
||||||
} else {
|
} else {
|
||||||
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
|
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
|
||||||
horizontal_frontporch_byte = vm->hfront_porch *
|
horizontal_frontporch_byte = vm->hfront_porch *
|
||||||
dsi_tmp_buf_bpp;
|
dsi_tmp_buf_bpp;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (vm->hfront_porch * dsi_tmp_buf_bpp >
|
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
|
||||||
data_phy_cycles * dsi->lanes + 12) {
|
data_phy_cycles * dsi->lanes + 12) {
|
||||||
horizontal_frontporch_byte = vm->hfront_porch *
|
horizontal_frontporch_byte =
|
||||||
dsi_tmp_buf_bpp -
|
vm->hfront_porch * dsi_tmp_buf_bpp -
|
||||||
data_phy_cycles *
|
(data_phy_cycles * dsi->lanes + 12) *
|
||||||
dsi->lanes - 12;
|
vm->hfront_porch /
|
||||||
|
(vm->hfront_porch + vm->hback_porch);
|
||||||
|
horizontal_backporch_byte = horizontal_backporch_byte -
|
||||||
|
(data_phy_cycles * dsi->lanes + 12) *
|
||||||
|
vm->hback_porch /
|
||||||
|
(vm->hfront_porch + vm->hback_porch);
|
||||||
} else {
|
} else {
|
||||||
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
|
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
|
||||||
horizontal_frontporch_byte = vm->hfront_porch *
|
horizontal_frontporch_byte = vm->hfront_porch *
|
||||||
|
|||||||
@@ -685,8 +685,6 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
|
|||||||
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
|
struct sun4i_hdmi *hdmi = dev_get_drvdata(dev);
|
||||||
|
|
||||||
cec_unregister_adapter(hdmi->cec_adap);
|
cec_unregister_adapter(hdmi->cec_adap);
|
||||||
drm_connector_cleanup(&hdmi->connector);
|
|
||||||
drm_encoder_cleanup(&hdmi->encoder);
|
|
||||||
i2c_del_adapter(hdmi->i2c);
|
i2c_del_adapter(hdmi->i2c);
|
||||||
i2c_put_adapter(hdmi->ddc_i2c);
|
i2c_put_adapter(hdmi->ddc_i2c);
|
||||||
clk_disable_unprepare(hdmi->mod_clk);
|
clk_disable_unprepare(hdmi->mod_clk);
|
||||||
|
|||||||
@@ -261,7 +261,8 @@ static int asus_event(struct hid_device *hdev, struct hid_field *field,
|
|||||||
struct hid_usage *usage, __s32 value)
|
struct hid_usage *usage, __s32 value)
|
||||||
{
|
{
|
||||||
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 &&
|
if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 &&
|
||||||
(usage->hid & HID_USAGE) != 0x00 && !usage->type) {
|
(usage->hid & HID_USAGE) != 0x00 &&
|
||||||
|
(usage->hid & HID_USAGE) != 0xff && !usage->type) {
|
||||||
hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n",
|
hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n",
|
||||||
usage->hid & HID_USAGE);
|
usage->hid & HID_USAGE);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -288,6 +288,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
|
|||||||
offset = report->size;
|
offset = report->size;
|
||||||
report->size += parser->global.report_size * parser->global.report_count;
|
report->size += parser->global.report_size * parser->global.report_count;
|
||||||
|
|
||||||
|
/* Total size check: Allow for possible report index byte */
|
||||||
|
if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
|
||||||
|
hid_err(parser->device, "report is too long\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
if (!parser->local.usage_index) /* Ignore padding fields */
|
if (!parser->local.usage_index) /* Ignore padding fields */
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|||||||
@@ -631,6 +631,7 @@
|
|||||||
#define USB_VENDOR_ID_ITE 0x048d
|
#define USB_VENDOR_ID_ITE 0x048d
|
||||||
#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
|
#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
|
||||||
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
|
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
|
||||||
|
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
|
||||||
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
|
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
|
||||||
#define USB_DEVICE_ID_ITE8595 0x8595
|
#define USB_DEVICE_ID_ITE8595 0x8595
|
||||||
|
|
||||||
@@ -730,6 +731,7 @@
|
|||||||
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
|
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
|
||||||
#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
|
#define USB_DEVICE_ID_LG_MELFAS_MT 0x6007
|
||||||
#define I2C_DEVICE_ID_LG_8001 0x8001
|
#define I2C_DEVICE_ID_LG_8001 0x8001
|
||||||
|
#define I2C_DEVICE_ID_LG_7010 0x7010
|
||||||
|
|
||||||
#define USB_VENDOR_ID_LOGITECH 0x046d
|
#define USB_VENDOR_ID_LOGITECH 0x046d
|
||||||
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
|
#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
|
||||||
@@ -1102,6 +1104,7 @@
|
|||||||
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
|
#define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10
|
||||||
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
|
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
|
||||||
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
|
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
|
||||||
|
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968
|
||||||
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
|
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
|
||||||
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
|
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
|
||||||
|
|
||||||
|
|||||||
@@ -1132,9 +1132,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
|
|||||||
}
|
}
|
||||||
|
|
||||||
mapped:
|
mapped:
|
||||||
if (device->driver->input_mapped && device->driver->input_mapped(device,
|
if (device->driver->input_mapped &&
|
||||||
hidinput, field, usage, &bit, &max) < 0)
|
device->driver->input_mapped(device, hidinput, field, usage,
|
||||||
goto ignore;
|
&bit, &max) < 0) {
|
||||||
|
/*
|
||||||
|
* The driver indicated that no further generic handling
|
||||||
|
* of the usage is desired.
|
||||||
|
*/
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
set_bit(usage->type, input->evbit);
|
set_bit(usage->type, input->evbit);
|
||||||
|
|
||||||
@@ -1215,9 +1221,11 @@ mapped:
|
|||||||
set_bit(MSC_SCAN, input->mscbit);
|
set_bit(MSC_SCAN, input->mscbit);
|
||||||
}
|
}
|
||||||
|
|
||||||
ignore:
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
ignore:
|
||||||
|
usage->type = 0;
|
||||||
|
usage->code = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hidinput_handle_scroll(struct hid_usage *usage,
|
static void hidinput_handle_scroll(struct hid_usage *usage,
|
||||||
|
|||||||
@@ -40,6 +40,9 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
|
|||||||
static const struct hid_device_id ite_devices[] = {
|
static const struct hid_device_id ite_devices[] = {
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
|
||||||
|
/* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
|
||||||
|
USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(hid, ite_devices);
|
MODULE_DEVICE_TABLE(hid, ite_devices);
|
||||||
|
|||||||
@@ -1019,7 +1019,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
|
|||||||
tool = MT_TOOL_DIAL;
|
tool = MT_TOOL_DIAL;
|
||||||
else if (unlikely(!confidence_state)) {
|
else if (unlikely(!confidence_state)) {
|
||||||
tool = MT_TOOL_PALM;
|
tool = MT_TOOL_PALM;
|
||||||
if (!active &&
|
if (!active && mt &&
|
||||||
input_mt_is_active(&mt->slots[slotnum])) {
|
input_mt_is_active(&mt->slots[slotnum])) {
|
||||||
/*
|
/*
|
||||||
* The non-confidence was reported for
|
* The non-confidence was reported for
|
||||||
@@ -1985,6 +1985,9 @@ static const struct hid_device_id mt_devices[] = {
|
|||||||
{ .driver_data = MT_CLS_LG,
|
{ .driver_data = MT_CLS_LG,
|
||||||
HID_USB_DEVICE(USB_VENDOR_ID_LG,
|
HID_USB_DEVICE(USB_VENDOR_ID_LG,
|
||||||
USB_DEVICE_ID_LG_MELFAS_MT) },
|
USB_DEVICE_ID_LG_MELFAS_MT) },
|
||||||
|
{ .driver_data = MT_CLS_LG,
|
||||||
|
HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
|
||||||
|
USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
|
||||||
|
|
||||||
/* MosArt panels */
|
/* MosArt panels */
|
||||||
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
|
{ .driver_data = MT_CLS_CONFIDENCE_MINUS_ONE,
|
||||||
|
|||||||
@@ -174,6 +174,7 @@ static const struct hid_device_id hid_quirks[] = {
|
|||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
|
||||||
|
|
||||||
{ 0 }
|
{ 0 }
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -768,8 +768,12 @@ static int steam_probe(struct hid_device *hdev,
|
|||||||
|
|
||||||
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
|
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
|
||||||
hid_info(hdev, "Steam wireless receiver connected");
|
hid_info(hdev, "Steam wireless receiver connected");
|
||||||
|
/* If using a wireless adaptor ask for connection status */
|
||||||
|
steam->connected = false;
|
||||||
steam_request_conn_status(steam);
|
steam_request_conn_status(steam);
|
||||||
} else {
|
} else {
|
||||||
|
/* A wired connection is always present */
|
||||||
|
steam->connected = true;
|
||||||
ret = steam_register(steam);
|
ret = steam_register(steam);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
hid_err(hdev,
|
hid_err(hdev,
|
||||||
|
|||||||
@@ -252,10 +252,10 @@ static __poll_t hidraw_poll(struct file *file, poll_table *wait)
|
|||||||
|
|
||||||
poll_wait(file, &list->hidraw->wait, wait);
|
poll_wait(file, &list->hidraw->wait, wait);
|
||||||
if (list->head != list->tail)
|
if (list->head != list->tail)
|
||||||
return EPOLLIN | EPOLLRDNORM | EPOLLOUT;
|
return EPOLLIN | EPOLLRDNORM;
|
||||||
if (!list->hidraw->exist)
|
if (!list->hidraw->exist)
|
||||||
return EPOLLERR | EPOLLHUP;
|
return EPOLLERR | EPOLLHUP;
|
||||||
return 0;
|
return EPOLLOUT | EPOLLWRNORM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hidraw_open(struct inode *inode, struct file *file)
|
static int hidraw_open(struct inode *inode, struct file *file)
|
||||||
|
|||||||
@@ -49,6 +49,8 @@
|
|||||||
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
|
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
|
||||||
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
|
#define I2C_HID_QUIRK_BOGUS_IRQ BIT(4)
|
||||||
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
|
#define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5)
|
||||||
|
#define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6)
|
||||||
|
|
||||||
|
|
||||||
/* flags */
|
/* flags */
|
||||||
#define I2C_HID_STARTED 0
|
#define I2C_HID_STARTED 0
|
||||||
@@ -175,6 +177,8 @@ static const struct i2c_hid_quirks {
|
|||||||
I2C_HID_QUIRK_BOGUS_IRQ },
|
I2C_HID_QUIRK_BOGUS_IRQ },
|
||||||
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
|
{ USB_VENDOR_ID_ALPS_JP, HID_ANY_ID,
|
||||||
I2C_HID_QUIRK_RESET_ON_RESUME },
|
I2C_HID_QUIRK_RESET_ON_RESUME },
|
||||||
|
{ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720,
|
||||||
|
I2C_HID_QUIRK_BAD_INPUT_SIZE },
|
||||||
{ 0, 0 }
|
{ 0, 0 }
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -496,10 +500,16 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((ret_size > size) || (ret_size < 2)) {
|
if ((ret_size > size) || (ret_size < 2)) {
|
||||||
|
if (ihid->quirks & I2C_HID_QUIRK_BAD_INPUT_SIZE) {
|
||||||
|
ihid->inbuf[0] = size & 0xff;
|
||||||
|
ihid->inbuf[1] = size >> 8;
|
||||||
|
ret_size = size;
|
||||||
|
} else {
|
||||||
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
|
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
|
||||||
__func__, size, ret_size);
|
__func__, size, ret_size);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
|
i2c_hid_dbg(ihid, "input: %*ph\n", ret_size, ihid->inbuf);
|
||||||
|
|
||||||
|
|||||||
@@ -24,7 +24,9 @@
|
|||||||
#define ICL_MOBILE_DEVICE_ID 0x34FC
|
#define ICL_MOBILE_DEVICE_ID 0x34FC
|
||||||
#define SPT_H_DEVICE_ID 0xA135
|
#define SPT_H_DEVICE_ID 0xA135
|
||||||
#define CML_LP_DEVICE_ID 0x02FC
|
#define CML_LP_DEVICE_ID 0x02FC
|
||||||
|
#define CMP_H_DEVICE_ID 0x06FC
|
||||||
#define EHL_Ax_DEVICE_ID 0x4BB3
|
#define EHL_Ax_DEVICE_ID 0x4BB3
|
||||||
|
#define TGL_LP_DEVICE_ID 0xA0FC
|
||||||
|
|
||||||
#define REVISION_ID_CHT_A0 0x6
|
#define REVISION_ID_CHT_A0 0x6
|
||||||
#define REVISION_ID_CHT_Ax_SI 0x0
|
#define REVISION_ID_CHT_Ax_SI 0x0
|
||||||
|
|||||||
@@ -34,7 +34,9 @@ static const struct pci_device_id ish_pci_tbl[] = {
|
|||||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
|
||||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
|
||||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
|
||||||
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CMP_H_DEVICE_ID)},
|
||||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
|
||||||
|
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)},
|
||||||
{0, }
|
{0, }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
|
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
|
||||||
|
|||||||
@@ -772,7 +772,7 @@ static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
|
|||||||
if (uhid->head != uhid->tail)
|
if (uhid->head != uhid->tail)
|
||||||
return EPOLLIN | EPOLLRDNORM;
|
return EPOLLIN | EPOLLRDNORM;
|
||||||
|
|
||||||
return 0;
|
return EPOLLOUT | EPOLLWRNORM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations uhid_fops = {
|
static const struct file_operations uhid_fops = {
|
||||||
|
|||||||
@@ -241,12 +241,51 @@ static int hiddev_release(struct inode * inode, struct file * file)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __hiddev_open(struct hiddev *hiddev, struct file *file)
|
||||||
|
{
|
||||||
|
struct hiddev_list *list;
|
||||||
|
int error;
|
||||||
|
|
||||||
|
lockdep_assert_held(&hiddev->existancelock);
|
||||||
|
|
||||||
|
list = vzalloc(sizeof(*list));
|
||||||
|
if (!list)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
mutex_init(&list->thread_lock);
|
||||||
|
list->hiddev = hiddev;
|
||||||
|
|
||||||
|
if (!hiddev->open++) {
|
||||||
|
error = hid_hw_power(hiddev->hid, PM_HINT_FULLON);
|
||||||
|
if (error < 0)
|
||||||
|
goto err_drop_count;
|
||||||
|
|
||||||
|
error = hid_hw_open(hiddev->hid);
|
||||||
|
if (error < 0)
|
||||||
|
goto err_normal_power;
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_lock_irq(&hiddev->list_lock);
|
||||||
|
list_add_tail(&list->node, &hiddev->list);
|
||||||
|
spin_unlock_irq(&hiddev->list_lock);
|
||||||
|
|
||||||
|
file->private_data = list;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
err_normal_power:
|
||||||
|
hid_hw_power(hiddev->hid, PM_HINT_NORMAL);
|
||||||
|
err_drop_count:
|
||||||
|
hiddev->open--;
|
||||||
|
vfree(list);
|
||||||
|
return error;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* open file op
|
* open file op
|
||||||
*/
|
*/
|
||||||
static int hiddev_open(struct inode *inode, struct file *file)
|
static int hiddev_open(struct inode *inode, struct file *file)
|
||||||
{
|
{
|
||||||
struct hiddev_list *list;
|
|
||||||
struct usb_interface *intf;
|
struct usb_interface *intf;
|
||||||
struct hid_device *hid;
|
struct hid_device *hid;
|
||||||
struct hiddev *hiddev;
|
struct hiddev *hiddev;
|
||||||
@@ -255,66 +294,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
|
|||||||
intf = usbhid_find_interface(iminor(inode));
|
intf = usbhid_find_interface(iminor(inode));
|
||||||
if (!intf)
|
if (!intf)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
hid = usb_get_intfdata(intf);
|
hid = usb_get_intfdata(intf);
|
||||||
hiddev = hid->hiddev;
|
hiddev = hid->hiddev;
|
||||||
|
|
||||||
if (!(list = vzalloc(sizeof(struct hiddev_list))))
|
|
||||||
return -ENOMEM;
|
|
||||||
mutex_init(&list->thread_lock);
|
|
||||||
list->hiddev = hiddev;
|
|
||||||
file->private_data = list;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* no need for locking because the USB major number
|
|
||||||
* is shared which usbcore guards against disconnect
|
|
||||||
*/
|
|
||||||
if (list->hiddev->exist) {
|
|
||||||
if (!list->hiddev->open++) {
|
|
||||||
res = hid_hw_open(hiddev->hid);
|
|
||||||
if (res < 0)
|
|
||||||
goto bail;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
res = -ENODEV;
|
|
||||||
goto bail;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irq(&list->hiddev->list_lock);
|
|
||||||
list_add_tail(&list->node, &hiddev->list);
|
|
||||||
spin_unlock_irq(&list->hiddev->list_lock);
|
|
||||||
|
|
||||||
mutex_lock(&hiddev->existancelock);
|
mutex_lock(&hiddev->existancelock);
|
||||||
/*
|
res = hiddev->exist ? __hiddev_open(hiddev, file) : -ENODEV;
|
||||||
* recheck exist with existance lock held to
|
|
||||||
* avoid opening a disconnected device
|
|
||||||
*/
|
|
||||||
if (!list->hiddev->exist) {
|
|
||||||
res = -ENODEV;
|
|
||||||
goto bail_unlock;
|
|
||||||
}
|
|
||||||
if (!list->hiddev->open++)
|
|
||||||
if (list->hiddev->exist) {
|
|
||||||
struct hid_device *hid = hiddev->hid;
|
|
||||||
res = hid_hw_power(hid, PM_HINT_FULLON);
|
|
||||||
if (res < 0)
|
|
||||||
goto bail_unlock;
|
|
||||||
res = hid_hw_open(hid);
|
|
||||||
if (res < 0)
|
|
||||||
goto bail_normal_power;
|
|
||||||
}
|
|
||||||
mutex_unlock(&hiddev->existancelock);
|
|
||||||
return 0;
|
|
||||||
bail_normal_power:
|
|
||||||
hid_hw_power(hid, PM_HINT_NORMAL);
|
|
||||||
bail_unlock:
|
|
||||||
mutex_unlock(&hiddev->existancelock);
|
mutex_unlock(&hiddev->existancelock);
|
||||||
|
|
||||||
spin_lock_irq(&list->hiddev->list_lock);
|
|
||||||
list_del(&list->node);
|
|
||||||
spin_unlock_irq(&list->hiddev->list_lock);
|
|
||||||
bail:
|
|
||||||
file->private_data = NULL;
|
|
||||||
vfree(list);
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2096,14 +2096,16 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
|
|||||||
(hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
|
(hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
|
||||||
hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */
|
hdev->product == 0x357 || hdev->product == 0x358 || /* Intuos Pro 2 */
|
||||||
hdev->product == 0x392 || /* Intuos Pro 2 */
|
hdev->product == 0x392 || /* Intuos Pro 2 */
|
||||||
hdev->product == 0x398 || hdev->product == 0x399)) { /* MobileStudio Pro */
|
hdev->product == 0x398 || hdev->product == 0x399 || /* MobileStudio Pro */
|
||||||
|
hdev->product == 0x3AA)) { /* MobileStudio Pro */
|
||||||
value = (field->logical_maximum - value);
|
value = (field->logical_maximum - value);
|
||||||
|
|
||||||
if (hdev->product == 0x357 || hdev->product == 0x358 ||
|
if (hdev->product == 0x357 || hdev->product == 0x358 ||
|
||||||
hdev->product == 0x392)
|
hdev->product == 0x392)
|
||||||
value = wacom_offset_rotation(input, usage, value, 3, 16);
|
value = wacom_offset_rotation(input, usage, value, 3, 16);
|
||||||
else if (hdev->product == 0x34d || hdev->product == 0x34e ||
|
else if (hdev->product == 0x34d || hdev->product == 0x34e ||
|
||||||
hdev->product == 0x398 || hdev->product == 0x399)
|
hdev->product == 0x398 || hdev->product == 0x399 ||
|
||||||
|
hdev->product == 0x3AA)
|
||||||
value = wacom_offset_rotation(input, usage, value, 1, 2);
|
value = wacom_offset_rotation(input, usage, value, 1, 2);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
|||||||
@@ -256,7 +256,7 @@ static int __init plic_init(struct device_node *node,
|
|||||||
* Skip contexts other than external interrupts for our
|
* Skip contexts other than external interrupts for our
|
||||||
* privilege level.
|
* privilege level.
|
||||||
*/
|
*/
|
||||||
if (parent.args[0] != IRQ_EXT)
|
if (parent.args[0] != RV_IRQ_EXT)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
hartid = plic_find_hart_id(parent.np);
|
hartid = plic_find_hart_id(parent.np);
|
||||||
|
|||||||
@@ -380,6 +380,7 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status)
|
|||||||
} else {
|
} else {
|
||||||
list_del_init(&data->list);
|
list_del_init(&data->list);
|
||||||
if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
|
if (!(data->msg.tx_status & CEC_TX_STATUS_OK))
|
||||||
|
if (!WARN_ON(!data->adap->transmit_queue_sz))
|
||||||
data->adap->transmit_queue_sz--;
|
data->adap->transmit_queue_sz--;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -432,6 +433,14 @@ static void cec_flush(struct cec_adapter *adap)
|
|||||||
* need to do anything special in that case.
|
* need to do anything special in that case.
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
* If something went wrong and this counter isn't what it should
|
||||||
|
* be, then this will reset it back to 0. Warn if it is not 0,
|
||||||
|
* since it indicates a bug, either in this framework or in a
|
||||||
|
* CEC driver.
|
||||||
|
*/
|
||||||
|
if (WARN_ON(adap->transmit_queue_sz))
|
||||||
|
adap->transmit_queue_sz = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -456,7 +465,7 @@ int cec_thread_func(void *_adap)
|
|||||||
bool timeout = false;
|
bool timeout = false;
|
||||||
u8 attempts;
|
u8 attempts;
|
||||||
|
|
||||||
if (adap->transmitting) {
|
if (adap->transmit_in_progress) {
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -491,7 +500,7 @@ int cec_thread_func(void *_adap)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adap->transmitting && timeout) {
|
if (adap->transmit_in_progress && timeout) {
|
||||||
/*
|
/*
|
||||||
* If we timeout, then log that. Normally this does
|
* If we timeout, then log that. Normally this does
|
||||||
* not happen and it is an indication of a faulty CEC
|
* not happen and it is an indication of a faulty CEC
|
||||||
@@ -500,14 +509,18 @@ int cec_thread_func(void *_adap)
|
|||||||
* so much traffic on the bus that the adapter was
|
* so much traffic on the bus that the adapter was
|
||||||
* unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
|
* unable to transmit for CEC_XFER_TIMEOUT_MS (2.1s).
|
||||||
*/
|
*/
|
||||||
|
if (adap->transmitting) {
|
||||||
pr_warn("cec-%s: message %*ph timed out\n", adap->name,
|
pr_warn("cec-%s: message %*ph timed out\n", adap->name,
|
||||||
adap->transmitting->msg.len,
|
adap->transmitting->msg.len,
|
||||||
adap->transmitting->msg.msg);
|
adap->transmitting->msg.msg);
|
||||||
adap->transmit_in_progress = false;
|
|
||||||
adap->tx_timeouts++;
|
|
||||||
/* Just give up on this. */
|
/* Just give up on this. */
|
||||||
cec_data_cancel(adap->transmitting,
|
cec_data_cancel(adap->transmitting,
|
||||||
CEC_TX_STATUS_TIMEOUT);
|
CEC_TX_STATUS_TIMEOUT);
|
||||||
|
} else {
|
||||||
|
pr_warn("cec-%s: transmit timed out\n", adap->name);
|
||||||
|
}
|
||||||
|
adap->transmit_in_progress = false;
|
||||||
|
adap->tx_timeouts++;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -522,6 +535,7 @@ int cec_thread_func(void *_adap)
|
|||||||
data = list_first_entry(&adap->transmit_queue,
|
data = list_first_entry(&adap->transmit_queue,
|
||||||
struct cec_data, list);
|
struct cec_data, list);
|
||||||
list_del_init(&data->list);
|
list_del_init(&data->list);
|
||||||
|
if (!WARN_ON(!data->adap->transmit_queue_sz))
|
||||||
adap->transmit_queue_sz--;
|
adap->transmit_queue_sz--;
|
||||||
|
|
||||||
/* Make this the current transmitting message */
|
/* Make this the current transmitting message */
|
||||||
@@ -1085,11 +1099,11 @@ void cec_received_msg_ts(struct cec_adapter *adap,
|
|||||||
valid_la = false;
|
valid_la = false;
|
||||||
else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
|
else if (!cec_msg_is_broadcast(msg) && !(dir_fl & DIRECTED))
|
||||||
valid_la = false;
|
valid_la = false;
|
||||||
else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST1_4))
|
else if (cec_msg_is_broadcast(msg) && !(dir_fl & BCAST))
|
||||||
valid_la = false;
|
valid_la = false;
|
||||||
else if (cec_msg_is_broadcast(msg) &&
|
else if (cec_msg_is_broadcast(msg) &&
|
||||||
adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0 &&
|
adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0 &&
|
||||||
!(dir_fl & BCAST2_0))
|
!(dir_fl & BCAST1_4))
|
||||||
valid_la = false;
|
valid_la = false;
|
||||||
}
|
}
|
||||||
if (valid_la && min_len) {
|
if (valid_la && min_len) {
|
||||||
|
|||||||
@@ -116,6 +116,7 @@ struct pulse8 {
|
|||||||
unsigned int vers;
|
unsigned int vers;
|
||||||
struct completion cmd_done;
|
struct completion cmd_done;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
|
u8 work_result;
|
||||||
struct delayed_work ping_eeprom_work;
|
struct delayed_work ping_eeprom_work;
|
||||||
struct cec_msg rx_msg;
|
struct cec_msg rx_msg;
|
||||||
u8 data[DATA_SIZE];
|
u8 data[DATA_SIZE];
|
||||||
@@ -137,8 +138,10 @@ static void pulse8_irq_work_handler(struct work_struct *work)
|
|||||||
{
|
{
|
||||||
struct pulse8 *pulse8 =
|
struct pulse8 *pulse8 =
|
||||||
container_of(work, struct pulse8, work);
|
container_of(work, struct pulse8, work);
|
||||||
|
u8 result = pulse8->work_result;
|
||||||
|
|
||||||
switch (pulse8->data[0] & 0x3f) {
|
pulse8->work_result = 0;
|
||||||
|
switch (result & 0x3f) {
|
||||||
case MSGCODE_FRAME_DATA:
|
case MSGCODE_FRAME_DATA:
|
||||||
cec_received_msg(pulse8->adap, &pulse8->rx_msg);
|
cec_received_msg(pulse8->adap, &pulse8->rx_msg);
|
||||||
break;
|
break;
|
||||||
@@ -172,12 +175,12 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
|
|||||||
pulse8->escape = false;
|
pulse8->escape = false;
|
||||||
} else if (data == MSGEND) {
|
} else if (data == MSGEND) {
|
||||||
struct cec_msg *msg = &pulse8->rx_msg;
|
struct cec_msg *msg = &pulse8->rx_msg;
|
||||||
|
u8 msgcode = pulse8->buf[0];
|
||||||
|
|
||||||
if (debug)
|
if (debug)
|
||||||
dev_info(pulse8->dev, "received: %*ph\n",
|
dev_info(pulse8->dev, "received: %*ph\n",
|
||||||
pulse8->idx, pulse8->buf);
|
pulse8->idx, pulse8->buf);
|
||||||
pulse8->data[0] = pulse8->buf[0];
|
switch (msgcode & 0x3f) {
|
||||||
switch (pulse8->buf[0] & 0x3f) {
|
|
||||||
case MSGCODE_FRAME_START:
|
case MSGCODE_FRAME_START:
|
||||||
msg->len = 1;
|
msg->len = 1;
|
||||||
msg->msg[0] = pulse8->buf[1];
|
msg->msg[0] = pulse8->buf[1];
|
||||||
@@ -186,14 +189,20 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
|
|||||||
if (msg->len == CEC_MAX_MSG_SIZE)
|
if (msg->len == CEC_MAX_MSG_SIZE)
|
||||||
break;
|
break;
|
||||||
msg->msg[msg->len++] = pulse8->buf[1];
|
msg->msg[msg->len++] = pulse8->buf[1];
|
||||||
if (pulse8->buf[0] & MSGCODE_FRAME_EOM)
|
if (msgcode & MSGCODE_FRAME_EOM) {
|
||||||
|
WARN_ON(pulse8->work_result);
|
||||||
|
pulse8->work_result = msgcode;
|
||||||
schedule_work(&pulse8->work);
|
schedule_work(&pulse8->work);
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
break;
|
||||||
case MSGCODE_TRANSMIT_SUCCEEDED:
|
case MSGCODE_TRANSMIT_SUCCEEDED:
|
||||||
case MSGCODE_TRANSMIT_FAILED_LINE:
|
case MSGCODE_TRANSMIT_FAILED_LINE:
|
||||||
case MSGCODE_TRANSMIT_FAILED_ACK:
|
case MSGCODE_TRANSMIT_FAILED_ACK:
|
||||||
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
|
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
|
||||||
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
|
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
|
||||||
|
WARN_ON(pulse8->work_result);
|
||||||
|
pulse8->work_result = msgcode;
|
||||||
schedule_work(&pulse8->work);
|
schedule_work(&pulse8->work);
|
||||||
break;
|
break;
|
||||||
case MSGCODE_HIGH_ERROR:
|
case MSGCODE_HIGH_ERROR:
|
||||||
|
|||||||
@@ -102,6 +102,7 @@
|
|||||||
#define TCAN4X5X_MODE_NORMAL BIT(7)
|
#define TCAN4X5X_MODE_NORMAL BIT(7)
|
||||||
|
|
||||||
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
|
#define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30))
|
||||||
|
#define TCAN4X5X_DISABLE_INH_MSK BIT(9)
|
||||||
|
|
||||||
#define TCAN4X5X_SW_RESET BIT(2)
|
#define TCAN4X5X_SW_RESET BIT(2)
|
||||||
|
|
||||||
@@ -166,6 +167,28 @@ static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int tcan4x5x_reset(struct tcan4x5x_priv *priv)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (priv->reset_gpio) {
|
||||||
|
gpiod_set_value(priv->reset_gpio, 1);
|
||||||
|
|
||||||
|
/* tpulse_width minimum 30us */
|
||||||
|
usleep_range(30, 100);
|
||||||
|
gpiod_set_value(priv->reset_gpio, 0);
|
||||||
|
} else {
|
||||||
|
ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG,
|
||||||
|
TCAN4X5X_SW_RESET);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
usleep_range(700, 1000);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int regmap_spi_gather_write(void *context, const void *reg,
|
static int regmap_spi_gather_write(void *context, const void *reg,
|
||||||
size_t reg_len, const void *val,
|
size_t reg_len, const void *val,
|
||||||
size_t val_len)
|
size_t val_len)
|
||||||
@@ -348,14 +371,23 @@ static int tcan4x5x_disable_wake(struct m_can_classdev *cdev)
|
|||||||
TCAN4X5X_DISABLE_WAKE_MSK, 0x00);
|
TCAN4X5X_DISABLE_WAKE_MSK, 0x00);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int tcan4x5x_disable_state(struct m_can_classdev *cdev)
|
||||||
|
{
|
||||||
|
struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
|
||||||
|
|
||||||
|
return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG,
|
||||||
|
TCAN4X5X_DISABLE_INH_MSK, 0x01);
|
||||||
|
}
|
||||||
|
|
||||||
static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
|
static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
|
||||||
{
|
{
|
||||||
struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
|
struct tcan4x5x_priv *tcan4x5x = cdev->device_data;
|
||||||
|
int ret;
|
||||||
|
|
||||||
tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
|
tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake",
|
||||||
GPIOD_OUT_HIGH);
|
GPIOD_OUT_HIGH);
|
||||||
if (IS_ERR(tcan4x5x->device_wake_gpio)) {
|
if (IS_ERR(tcan4x5x->device_wake_gpio)) {
|
||||||
if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
|
if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER)
|
||||||
return -EPROBE_DEFER;
|
return -EPROBE_DEFER;
|
||||||
|
|
||||||
tcan4x5x_disable_wake(cdev);
|
tcan4x5x_disable_wake(cdev);
|
||||||
@@ -366,18 +398,17 @@ static int tcan4x5x_parse_config(struct m_can_classdev *cdev)
|
|||||||
if (IS_ERR(tcan4x5x->reset_gpio))
|
if (IS_ERR(tcan4x5x->reset_gpio))
|
||||||
tcan4x5x->reset_gpio = NULL;
|
tcan4x5x->reset_gpio = NULL;
|
||||||
|
|
||||||
usleep_range(700, 1000);
|
ret = tcan4x5x_reset(tcan4x5x);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
|
tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev,
|
||||||
"device-state",
|
"device-state",
|
||||||
GPIOD_IN);
|
GPIOD_IN);
|
||||||
if (IS_ERR(tcan4x5x->device_state_gpio))
|
if (IS_ERR(tcan4x5x->device_state_gpio)) {
|
||||||
tcan4x5x->device_state_gpio = NULL;
|
tcan4x5x->device_state_gpio = NULL;
|
||||||
|
tcan4x5x_disable_state(cdev);
|
||||||
tcan4x5x->power = devm_regulator_get_optional(cdev->dev,
|
}
|
||||||
"vsup");
|
|
||||||
if (PTR_ERR(tcan4x5x->power) == -EPROBE_DEFER)
|
|
||||||
return -EPROBE_DEFER;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -412,6 +443,12 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
|
|||||||
if (!priv)
|
if (!priv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
|
||||||
|
if (PTR_ERR(priv->power) == -EPROBE_DEFER)
|
||||||
|
return -EPROBE_DEFER;
|
||||||
|
else
|
||||||
|
priv->power = NULL;
|
||||||
|
|
||||||
mcan_class->device_data = priv;
|
mcan_class->device_data = priv;
|
||||||
|
|
||||||
m_can_class_get_clocks(mcan_class);
|
m_can_class_get_clocks(mcan_class);
|
||||||
@@ -451,11 +488,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
|
|||||||
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
|
priv->regmap = devm_regmap_init(&spi->dev, &tcan4x5x_bus,
|
||||||
&spi->dev, &tcan4x5x_regmap);
|
&spi->dev, &tcan4x5x_regmap);
|
||||||
|
|
||||||
ret = tcan4x5x_parse_config(mcan_class);
|
ret = tcan4x5x_power_enable(priv->power, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_clk;
|
goto out_clk;
|
||||||
|
|
||||||
tcan4x5x_power_enable(priv->power, 1);
|
ret = tcan4x5x_parse_config(mcan_class);
|
||||||
|
if (ret)
|
||||||
|
goto out_power;
|
||||||
|
|
||||||
|
ret = tcan4x5x_init(mcan_class);
|
||||||
|
if (ret)
|
||||||
|
goto out_power;
|
||||||
|
|
||||||
ret = m_can_class_register(mcan_class);
|
ret = m_can_class_register(mcan_class);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|||||||
@@ -381,13 +381,12 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
|
|||||||
struct net_device *dev = napi->dev;
|
struct net_device *dev = napi->dev;
|
||||||
struct mscan_regs __iomem *regs = priv->reg_base;
|
struct mscan_regs __iomem *regs = priv->reg_base;
|
||||||
struct net_device_stats *stats = &dev->stats;
|
struct net_device_stats *stats = &dev->stats;
|
||||||
int npackets = 0;
|
int work_done = 0;
|
||||||
int ret = 1;
|
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
struct can_frame *frame;
|
struct can_frame *frame;
|
||||||
u8 canrflg;
|
u8 canrflg;
|
||||||
|
|
||||||
while (npackets < quota) {
|
while (work_done < quota) {
|
||||||
canrflg = in_8(®s->canrflg);
|
canrflg = in_8(®s->canrflg);
|
||||||
if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
|
if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
|
||||||
break;
|
break;
|
||||||
@@ -408,18 +407,18 @@ static int mscan_rx_poll(struct napi_struct *napi, int quota)
|
|||||||
|
|
||||||
stats->rx_packets++;
|
stats->rx_packets++;
|
||||||
stats->rx_bytes += frame->can_dlc;
|
stats->rx_bytes += frame->can_dlc;
|
||||||
npackets++;
|
work_done++;
|
||||||
netif_receive_skb(skb);
|
netif_receive_skb(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(in_8(®s->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
|
if (work_done < quota) {
|
||||||
napi_complete(&priv->napi);
|
if (likely(napi_complete_done(&priv->napi, work_done))) {
|
||||||
clear_bit(F_RX_PROGRESS, &priv->flags);
|
clear_bit(F_RX_PROGRESS, &priv->flags);
|
||||||
if (priv->can.state < CAN_STATE_BUS_OFF)
|
if (priv->can.state < CAN_STATE_BUS_OFF)
|
||||||
out_8(®s->canrier, priv->shadow_canrier);
|
out_8(®s->canrier, priv->shadow_canrier);
|
||||||
ret = 0;
|
|
||||||
}
|
}
|
||||||
return ret;
|
}
|
||||||
|
return work_done;
|
||||||
}
|
}
|
||||||
|
|
||||||
static irqreturn_t mscan_isr(int irq, void *dev_id)
|
static irqreturn_t mscan_isr(int irq, void *dev_id)
|
||||||
|
|||||||
@@ -918,7 +918,7 @@ static int gs_usb_probe(struct usb_interface *intf,
|
|||||||
GS_USB_BREQ_HOST_FORMAT,
|
GS_USB_BREQ_HOST_FORMAT,
|
||||||
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
|
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
|
||||||
1,
|
1,
|
||||||
intf->altsetting[0].desc.bInterfaceNumber,
|
intf->cur_altsetting->desc.bInterfaceNumber,
|
||||||
hconf,
|
hconf,
|
||||||
sizeof(*hconf),
|
sizeof(*hconf),
|
||||||
1000);
|
1000);
|
||||||
@@ -941,7 +941,7 @@ static int gs_usb_probe(struct usb_interface *intf,
|
|||||||
GS_USB_BREQ_DEVICE_CONFIG,
|
GS_USB_BREQ_DEVICE_CONFIG,
|
||||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
|
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
|
||||||
1,
|
1,
|
||||||
intf->altsetting[0].desc.bInterfaceNumber,
|
intf->cur_altsetting->desc.bInterfaceNumber,
|
||||||
dconf,
|
dconf,
|
||||||
sizeof(*dconf),
|
sizeof(*dconf),
|
||||||
1000);
|
1000);
|
||||||
|
|||||||
@@ -1590,7 +1590,7 @@ static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev)
|
|||||||
struct usb_endpoint_descriptor *ep;
|
struct usb_endpoint_descriptor *ep;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
iface_desc = &dev->intf->altsetting[0];
|
iface_desc = dev->intf->cur_altsetting;
|
||||||
|
|
||||||
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
|
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
|
||||||
ep = &iface_desc->endpoint[i].desc;
|
ep = &iface_desc->endpoint[i].desc;
|
||||||
|
|||||||
@@ -1310,7 +1310,7 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev)
|
|||||||
struct usb_endpoint_descriptor *endpoint;
|
struct usb_endpoint_descriptor *endpoint;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
iface_desc = &dev->intf->altsetting[0];
|
iface_desc = dev->intf->cur_altsetting;
|
||||||
|
|
||||||
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
|
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
|
||||||
endpoint = &iface_desc->endpoint[i].desc;
|
endpoint = &iface_desc->endpoint[i].desc;
|
||||||
|
|||||||
@@ -360,6 +360,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
|
|||||||
{
|
{
|
||||||
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
|
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
|
||||||
|
|
||||||
|
/* Use the default high priority for management frames sent to
|
||||||
|
* the CPU.
|
||||||
|
*/
|
||||||
|
port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
|
||||||
|
|
||||||
return mv88e6390_g1_monitor_write(chip, ptr, port);
|
return mv88e6390_g1_monitor_write(chip, ptr, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -211,6 +211,7 @@
|
|||||||
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
|
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
|
||||||
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
|
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
|
||||||
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
|
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
|
||||||
|
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
|
||||||
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
|
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
|
||||||
|
|
||||||
/* Offset 0x1C: Global Control 2 */
|
/* Offset 0x1C: Global Control 2 */
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user