mirror of
https://github.com/torvalds/linux.git
synced 2024-11-11 06:31:49 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor overlapping changes in the conflicts. In the macsec case, the change of the default ID macro name overlapped with the 64-bit netlink attribute alignment fixes in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c0cc53162a
@ -192,7 +192,6 @@ nodes to be present and contain the properties described below.
|
||||
can be one of:
|
||||
"allwinner,sun6i-a31"
|
||||
"allwinner,sun8i-a23"
|
||||
"arm,psci"
|
||||
"arm,realview-smp"
|
||||
"brcm,bcm-nsp-smp"
|
||||
"brcm,brahma-b15"
|
||||
|
@ -8,15 +8,19 @@ Required properties:
|
||||
of memory mapped region.
|
||||
- clock-names: from common clock binding:
|
||||
Required elements: "24m"
|
||||
- rockchip,grf: phandle to the syscon managing the "general register files"
|
||||
- #phy-cells : from the generic PHY bindings, must be 0;
|
||||
|
||||
Example:
|
||||
|
||||
edp_phy: edp-phy {
|
||||
compatible = "rockchip,rk3288-dp-phy";
|
||||
rockchip,grf = <&grf>;
|
||||
clocks = <&cru SCLK_EDP_24M>;
|
||||
clock-names = "24m";
|
||||
#phy-cells = <0>;
|
||||
grf: syscon@ff770000 {
|
||||
compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
|
||||
|
||||
...
|
||||
|
||||
edp_phy: edp-phy {
|
||||
compatible = "rockchip,rk3288-dp-phy";
|
||||
clocks = <&cru SCLK_EDP_24M>;
|
||||
clock-names = "24m";
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
};
|
||||
|
@ -3,17 +3,23 @@ Rockchip EMMC PHY
|
||||
|
||||
Required properties:
|
||||
- compatible: rockchip,rk3399-emmc-phy
|
||||
- rockchip,grf : phandle to the syscon managing the "general
|
||||
register files"
|
||||
- #phy-cells: must be 0
|
||||
- reg: PHY configure reg address offset in "general
|
||||
- reg: PHY register address offset and length in "general
|
||||
register files"
|
||||
|
||||
Example:
|
||||
|
||||
emmcphy: phy {
|
||||
compatible = "rockchip,rk3399-emmc-phy";
|
||||
rockchip,grf = <&grf>;
|
||||
reg = <0xf780>;
|
||||
#phy-cells = <0>;
|
||||
|
||||
grf: syscon@ff770000 {
|
||||
compatible = "rockchip,rk3399-grf", "syscon", "simple-mfd";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
...
|
||||
|
||||
emmcphy: phy@f780 {
|
||||
compatible = "rockchip,rk3399-emmc-phy";
|
||||
reg = <0xf780 0x20>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
};
|
||||
|
@ -173,6 +173,10 @@ A few EV_ABS codes have special meanings:
|
||||
proximity of the device and while the value of the BTN_TOUCH code is 0. If
|
||||
the input device may be used freely in three dimensions, consider ABS_Z
|
||||
instead.
|
||||
- BTN_TOOL_<name> should be set to 1 when the tool comes into detectable
|
||||
proximity and set to 0 when the tool leaves detectable proximity.
|
||||
BTN_TOOL_<name> signals the type of tool that is currently detected by the
|
||||
hardware and is otherwise independent of ABS_DISTANCE and/or BTN_TOUCH.
|
||||
|
||||
* ABS_MT_<name>:
|
||||
- Used to describe multitouch input events. Please see
|
||||
|
@ -19,7 +19,7 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
||||
ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
|
||||
... unused hole ...
|
||||
ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
|
||||
ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
|
||||
ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
|
||||
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
|
||||
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
||||
|
||||
@ -31,8 +31,8 @@ vmalloc space is lazily synchronized into the different PML4 pages of
|
||||
the processes using the page fault handler, with init_level4_pgt as
|
||||
reference.
|
||||
|
||||
Current X86-64 implementations only support 40 bits of address space,
|
||||
but we support up to 46 bits. This expands into MBZ space in the page tables.
|
||||
Current X86-64 implementations support up to 46 bits of address space (64 TB),
|
||||
which is our current limit. This expands into MBZ space in the page tables.
|
||||
|
||||
We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
|
||||
memory window (this size is arbitrary, it can be raised later if needed).
|
||||
|
@ -11072,6 +11072,15 @@ S: Maintained
|
||||
F: drivers/clk/ti/
|
||||
F: include/linux/clk/ti.h
|
||||
|
||||
TI ETHERNET SWITCH DRIVER (CPSW)
|
||||
M: Mugunthan V N <mugunthanvnm@ti.com>
|
||||
R: Grygorii Strashko <grygorii.strashko@ti.com>
|
||||
L: linux-omap@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/ti/cpsw*
|
||||
F: drivers/net/ethernet/ti/davinci*
|
||||
|
||||
TI FLASH MEDIA INTERFACE DRIVER
|
||||
M: Alex Dubov <oakad@yahoo.com>
|
||||
S: Maintained
|
||||
|
5
Makefile
5
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -1008,7 +1008,8 @@ prepare0: archprepare FORCE
|
||||
prepare: prepare0 prepare-objtool
|
||||
|
||||
ifdef CONFIG_STACK_VALIDATION
|
||||
has_libelf := $(shell echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf - &> /dev/null && echo 1 || echo 0)
|
||||
has_libelf := $(call try-run,\
|
||||
echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
|
||||
ifeq ($(has_libelf),1)
|
||||
objtool_target := tools/objtool FORCE
|
||||
else
|
||||
|
@ -860,7 +860,7 @@
|
||||
ti,no-idle-on-init;
|
||||
reg = <0x50000000 0x2000>;
|
||||
interrupts = <100>;
|
||||
dmas = <&edma 52>;
|
||||
dmas = <&edma 52 0>;
|
||||
dma-names = "rxtx";
|
||||
gpmc,num-cs = <7>;
|
||||
gpmc,num-waitpins = <2>;
|
||||
|
@ -884,7 +884,7 @@
|
||||
gpmc: gpmc@50000000 {
|
||||
compatible = "ti,am3352-gpmc";
|
||||
ti,hwmods = "gpmc";
|
||||
dmas = <&edma 52>;
|
||||
dmas = <&edma 52 0>;
|
||||
dma-names = "rxtx";
|
||||
clocks = <&l3s_gclk>;
|
||||
clock-names = "fck";
|
||||
|
@ -99,13 +99,6 @@
|
||||
#cooling-cells = <2>;
|
||||
};
|
||||
|
||||
extcon_usb1: extcon_usb1 {
|
||||
compatible = "linux,extcon-usb-gpio";
|
||||
id-gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&extcon_usb1_pins>;
|
||||
};
|
||||
|
||||
hdmi0: connector {
|
||||
compatible = "hdmi-connector";
|
||||
label = "hdmi";
|
||||
@ -349,12 +342,6 @@
|
||||
>;
|
||||
};
|
||||
|
||||
extcon_usb1_pins: extcon_usb1_pins {
|
||||
pinctrl-single,pins = <
|
||||
DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_rtsn.gpio7_25 */
|
||||
>;
|
||||
};
|
||||
|
||||
tpd12s015_pins: pinmux_tpd12s015_pins {
|
||||
pinctrl-single,pins = <
|
||||
DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */
|
||||
@ -706,10 +693,6 @@
|
||||
pinctrl-0 = <&usb1_pins>;
|
||||
};
|
||||
|
||||
&omap_dwc3_1 {
|
||||
extcon = <&extcon_usb1>;
|
||||
};
|
||||
|
||||
&omap_dwc3_2 {
|
||||
extcon = <&extcon_usb2>;
|
||||
};
|
||||
|
@ -4,6 +4,157 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
&pllss {
|
||||
/*
|
||||
* See TRM "2.6.10 Connected outputso DPLLS" and
|
||||
* "2.6.11 Connected Outputs of DPLLJ". Only clkout is
|
||||
* connected except for hdmi and usb.
|
||||
*/
|
||||
adpll_mpu_ck: adpll@40 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-s-clock";
|
||||
reg = <0x40 0x40>;
|
||||
clocks = <&devosc_ck &devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow", "clkinphif";
|
||||
clock-output-names = "481c5040.adpll.dcoclkldo",
|
||||
"481c5040.adpll.clkout",
|
||||
"481c5040.adpll.clkoutx2",
|
||||
"481c5040.adpll.clkouthif";
|
||||
};
|
||||
|
||||
adpll_dsp_ck: adpll@80 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x80 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c5080.adpll.dcoclkldo",
|
||||
"481c5080.adpll.clkout",
|
||||
"481c5080.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_sgx_ck: adpll@b0 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0xb0 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c50b0.adpll.dcoclkldo",
|
||||
"481c50b0.adpll.clkout",
|
||||
"481c50b0.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_hdvic_ck: adpll@e0 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0xe0 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c50e0.adpll.dcoclkldo",
|
||||
"481c50e0.adpll.clkout",
|
||||
"481c50e0.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_l3_ck: adpll@110 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x110 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c5110.adpll.dcoclkldo",
|
||||
"481c5110.adpll.clkout",
|
||||
"481c5110.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_isp_ck: adpll@140 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x140 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c5140.adpll.dcoclkldo",
|
||||
"481c5140.adpll.clkout",
|
||||
"481c5140.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_dss_ck: adpll@170 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x170 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c5170.adpll.dcoclkldo",
|
||||
"481c5170.adpll.clkout",
|
||||
"481c5170.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_video0_ck: adpll@1a0 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x1a0 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c51a0.adpll.dcoclkldo",
|
||||
"481c51a0.adpll.clkout",
|
||||
"481c51a0.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_video1_ck: adpll@1d0 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x1d0 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c51d0.adpll.dcoclkldo",
|
||||
"481c51d0.adpll.clkout",
|
||||
"481c51d0.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_hdmi_ck: adpll@200 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x200 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c5200.adpll.dcoclkldo",
|
||||
"481c5200.adpll.clkout",
|
||||
"481c5200.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_audio_ck: adpll@230 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x230 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c5230.adpll.dcoclkldo",
|
||||
"481c5230.adpll.clkout",
|
||||
"481c5230.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_usb_ck: adpll@260 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x260 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c5260.adpll.dcoclkldo",
|
||||
"481c5260.adpll.clkout",
|
||||
"481c5260.adpll.clkoutldo";
|
||||
};
|
||||
|
||||
adpll_ddr_ck: adpll@290 {
|
||||
#clock-cells = <1>;
|
||||
compatible = "ti,dm814-adpll-lj-clock";
|
||||
reg = <0x290 0x30>;
|
||||
clocks = <&devosc_ck &devosc_ck>;
|
||||
clock-names = "clkinp", "clkinpulow";
|
||||
clock-output-names = "481c5290.adpll.dcoclkldo",
|
||||
"481c5290.adpll.clkout",
|
||||
"481c5290.adpll.clkoutldo";
|
||||
};
|
||||
};
|
||||
|
||||
&pllss_clocks {
|
||||
timer1_fck: timer1_fck {
|
||||
#clock-cells = <0>;
|
||||
@ -23,6 +174,24 @@
|
||||
reg = <0x2e0>;
|
||||
};
|
||||
|
||||
/* CPTS_RFT_CLK in RMII_REFCLK_SRC, usually sourced from auiod */
|
||||
cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
|
||||
#clock-cells = <0>;
|
||||
compatible = "ti,mux-clock";
|
||||
clocks = <&adpll_video0_ck 1
|
||||
&adpll_video1_ck 1
|
||||
&adpll_audio_ck 1>;
|
||||
ti,bit-shift = <1>;
|
||||
reg = <0x2e8>;
|
||||
};
|
||||
|
||||
/* REVISIT: Set up with a proper mux using RMII_REFCLK_SRC */
|
||||
cpsw_125mhz_gclk: cpsw_125mhz_gclk {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <125000000>;
|
||||
};
|
||||
|
||||
sysclk18_ck: sysclk18_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "ti,mux-clock";
|
||||
@ -79,37 +248,6 @@
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <1000000000>;
|
||||
};
|
||||
|
||||
sysclk4_ck: sysclk4_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <222000000>;
|
||||
};
|
||||
|
||||
sysclk6_ck: sysclk6_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <100000000>;
|
||||
};
|
||||
|
||||
sysclk10_ck: sysclk10_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
|
||||
cpsw_125mhz_gclk: cpsw_125mhz_gclk {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <125000000>;
|
||||
};
|
||||
|
||||
cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <250000000>;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
&prcm_clocks {
|
||||
@ -138,6 +276,49 @@
|
||||
clock-div = <78125>;
|
||||
};
|
||||
|
||||
/* L4_HS 220 MHz*/
|
||||
sysclk4_ck: sysclk4_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "ti,fixed-factor-clock";
|
||||
clocks = <&adpll_l3_ck 1>;
|
||||
ti,clock-mult = <1>;
|
||||
ti,clock-div = <1>;
|
||||
};
|
||||
|
||||
/* L4_FWCFG */
|
||||
sysclk5_ck: sysclk5_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "ti,fixed-factor-clock";
|
||||
clocks = <&adpll_l3_ck 1>;
|
||||
ti,clock-mult = <1>;
|
||||
ti,clock-div = <2>;
|
||||
};
|
||||
|
||||
/* L4_LS 110 MHz */
|
||||
sysclk6_ck: sysclk6_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "ti,fixed-factor-clock";
|
||||
clocks = <&adpll_l3_ck 1>;
|
||||
ti,clock-mult = <1>;
|
||||
ti,clock-div = <2>;
|
||||
};
|
||||
|
||||
sysclk8_ck: sysclk8_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "ti,fixed-factor-clock";
|
||||
clocks = <&adpll_usb_ck 1>;
|
||||
ti,clock-mult = <1>;
|
||||
ti,clock-div = <1>;
|
||||
};
|
||||
|
||||
sysclk10_ck: sysclk10_ck {
|
||||
compatible = "ti,divider-clock";
|
||||
reg = <0x324>;
|
||||
ti,max-div = <7>;
|
||||
#clock-cells = <0>;
|
||||
clocks = <&adpll_usb_ck 1>;
|
||||
};
|
||||
|
||||
aud_clkin0_ck: aud_clkin0_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
|
@ -6,6 +6,32 @@
|
||||
|
||||
#include "dm814x-clocks.dtsi"
|
||||
|
||||
/* Compared to dm814x, dra62x does not have hdic, l3 or dss PLLs */
|
||||
&adpll_hdvic_ck {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
&adpll_l3_ck {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
&adpll_dss_ck {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
/* Compared to dm814x, dra62x has interconnect clocks on isp PLL */
|
||||
&sysclk4_ck {
|
||||
clocks = <&adpll_isp_ck 1>;
|
||||
};
|
||||
|
||||
&sysclk5_ck {
|
||||
clocks = <&adpll_isp_ck 1>;
|
||||
};
|
||||
|
||||
&sysclk6_ck {
|
||||
clocks = <&adpll_isp_ck 1>;
|
||||
};
|
||||
|
||||
/*
|
||||
* Compared to dm814x, dra62x has different shifts and more mux options.
|
||||
* Please add the extra options for ysclk_14 and 16 if really needed.
|
||||
|
@ -98,12 +98,20 @@
|
||||
clock-frequency = <32768>;
|
||||
};
|
||||
|
||||
sys_32k_ck: sys_32k_ck {
|
||||
sys_clk32_crystal_ck: sys_clk32_crystal_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
clock-frequency = <32768>;
|
||||
};
|
||||
|
||||
sys_clk32_pseudo_ck: sys_clk32_pseudo_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-factor-clock";
|
||||
clocks = <&sys_clkin1>;
|
||||
clock-mult = <1>;
|
||||
clock-div = <610>;
|
||||
};
|
||||
|
||||
virt_12000000_ck: virt_12000000_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-clock";
|
||||
@ -2170,4 +2178,12 @@
|
||||
ti,bit-shift = <22>;
|
||||
reg = <0x0558>;
|
||||
};
|
||||
|
||||
sys_32k_ck: sys_32k_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "ti,mux-clock";
|
||||
clocks = <&sys_clk32_crystal_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>;
|
||||
ti,bit-shift = <8>;
|
||||
reg = <0x6c4>;
|
||||
};
|
||||
};
|
||||
|
@ -1,6 +1,6 @@
|
||||
/dts-v1/;
|
||||
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
#include <dt-bindings/clock/qcom,gcc-msm8974.h>
|
||||
#include "skeleton.dtsi"
|
||||
|
||||
@ -460,8 +460,6 @@
|
||||
clock-names = "core", "iface";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
|
||||
dma-names = "tx", "rx";
|
||||
};
|
||||
|
||||
spmi_bus: spmi@fc4cf000 {
|
||||
@ -479,16 +477,6 @@
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <4>;
|
||||
};
|
||||
|
||||
blsp2_dma: dma-controller@f9944000 {
|
||||
compatible = "qcom,bam-v1.4.0";
|
||||
reg = <0xf9944000 0x19000>;
|
||||
interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&gcc GCC_BLSP2_AHB_CLK>;
|
||||
clock-names = "bam_clk";
|
||||
#dma-cells = <1>;
|
||||
qcom,ee = <0>;
|
||||
};
|
||||
};
|
||||
|
||||
smd {
|
||||
|
@ -661,6 +661,7 @@
|
||||
};
|
||||
|
||||
&pcie_bus_clk {
|
||||
clock-frequency = <100000000>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -143,19 +143,11 @@
|
||||
};
|
||||
|
||||
&pfc {
|
||||
pinctrl-0 = <&scif_clk_pins>;
|
||||
pinctrl-names = "default";
|
||||
|
||||
scif0_pins: serial0 {
|
||||
renesas,groups = "scif0_data_d";
|
||||
renesas,function = "scif0";
|
||||
};
|
||||
|
||||
scif_clk_pins: scif_clk {
|
||||
renesas,groups = "scif_clk";
|
||||
renesas,function = "scif_clk";
|
||||
};
|
||||
|
||||
ether_pins: ether {
|
||||
renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
|
||||
renesas,function = "eth";
|
||||
@ -229,11 +221,6 @@
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&scif_clk {
|
||||
clock-frequency = <14745600>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
ðer {
|
||||
pinctrl-0 = <ðer_pins &phy1_pins>;
|
||||
pinctrl-names = "default";
|
||||
@ -414,6 +401,7 @@
|
||||
};
|
||||
|
||||
&pcie_bus_clk {
|
||||
clock-frequency = <100000000>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -1083,9 +1083,8 @@
|
||||
pcie_bus_clk: pcie_bus_clk {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <100000000>;
|
||||
clock-frequency = <0>;
|
||||
clock-output-names = "pcie_bus";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
/* External SCIF clock */
|
||||
@ -1094,7 +1093,6 @@
|
||||
#clock-cells = <0>;
|
||||
/* This value must be overridden by the board. */
|
||||
clock-frequency = <0>;
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
/* External USB clock - can be overridden by the board */
|
||||
@ -1112,7 +1110,6 @@
|
||||
/* This value must be overridden by the board. */
|
||||
clock-frequency = <0>;
|
||||
clock-output-names = "can_clk";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
/* Special CPG clocks */
|
||||
|
@ -71,6 +71,7 @@ struct platform_device *__init imx_add_sdhci_esdhc_imx(
|
||||
if (!pdata)
|
||||
pdata = &default_esdhc_pdata;
|
||||
|
||||
return imx_add_platform_device(data->devid, data->id, res,
|
||||
ARRAY_SIZE(res), pdata, sizeof(*pdata));
|
||||
return imx_add_platform_device_dmamask(data->devid, data->id, res,
|
||||
ARRAY_SIZE(res), pdata, sizeof(*pdata),
|
||||
DMA_BIT_MASK(32));
|
||||
}
|
||||
|
@ -461,7 +461,7 @@ static struct clockdomain ipu_7xx_clkdm = {
|
||||
.cm_inst = DRA7XX_CM_CORE_AON_IPU_INST,
|
||||
.clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS,
|
||||
.dep_bit = DRA7XX_IPU_STATDEP_SHIFT,
|
||||
.flags = CLKDM_CAN_HWSUP_SWSUP,
|
||||
.flags = CLKDM_CAN_SWSUP,
|
||||
};
|
||||
|
||||
static struct clockdomain mpu1_7xx_clkdm = {
|
||||
|
@ -737,7 +737,8 @@ void __init omap5_init_late(void)
|
||||
#ifdef CONFIG_SOC_DRA7XX
|
||||
void __init dra7xx_init_early(void)
|
||||
{
|
||||
omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
|
||||
omap2_set_globals_tap(DRA7XX_CLASS,
|
||||
OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
|
||||
omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
|
||||
omap2_control_base_init();
|
||||
omap4_pm_init_early();
|
||||
|
@ -274,6 +274,10 @@ static inline void omap5_irq_save_context(void)
|
||||
*/
|
||||
static void irq_save_context(void)
|
||||
{
|
||||
/* DRA7 has no SAR to save */
|
||||
if (soc_is_dra7xx())
|
||||
return;
|
||||
|
||||
if (!sar_base)
|
||||
sar_base = omap4_get_sar_ram_base();
|
||||
|
||||
@ -290,6 +294,9 @@ static void irq_sar_clear(void)
|
||||
{
|
||||
u32 val;
|
||||
u32 offset = SAR_BACKUP_STATUS_OFFSET;
|
||||
/* DRA7 has no SAR to save */
|
||||
if (soc_is_dra7xx())
|
||||
return;
|
||||
|
||||
if (soc_is_omap54xx())
|
||||
offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
|
||||
|
@ -198,7 +198,6 @@ void omap_sram_idle(void)
|
||||
int per_next_state = PWRDM_POWER_ON;
|
||||
int core_next_state = PWRDM_POWER_ON;
|
||||
int per_going_off;
|
||||
int core_prev_state;
|
||||
u32 sdrc_pwr = 0;
|
||||
|
||||
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
|
||||
@ -278,16 +277,20 @@ void omap_sram_idle(void)
|
||||
sdrc_write_reg(sdrc_pwr, SDRC_POWER);
|
||||
|
||||
/* CORE */
|
||||
if (core_next_state < PWRDM_POWER_ON) {
|
||||
core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
|
||||
if (core_prev_state == PWRDM_POWER_OFF) {
|
||||
omap3_core_restore_context();
|
||||
omap3_cm_restore_context();
|
||||
omap3_sram_restore_context();
|
||||
omap2_sms_restore_context();
|
||||
}
|
||||
if (core_next_state < PWRDM_POWER_ON &&
|
||||
pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
|
||||
omap3_core_restore_context();
|
||||
omap3_cm_restore_context();
|
||||
omap3_sram_restore_context();
|
||||
omap2_sms_restore_context();
|
||||
} else {
|
||||
/*
|
||||
* In off-mode resume path above, omap3_core_restore_context
|
||||
* also handles the INTC autoidle restore done here so limit
|
||||
* this to non-off mode resume paths so we don't do it twice.
|
||||
*/
|
||||
omap3_intc_resume_idle();
|
||||
}
|
||||
omap3_intc_resume_idle();
|
||||
|
||||
pwrdm_post_transition(NULL);
|
||||
|
||||
|
@ -40,8 +40,7 @@ static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz,
|
||||
void __init shmobile_init_delay(void)
|
||||
{
|
||||
struct device_node *np, *cpus;
|
||||
bool is_a7_a8_a9 = false;
|
||||
bool is_a15 = false;
|
||||
unsigned int div = 0;
|
||||
bool has_arch_timer = false;
|
||||
u32 max_freq = 0;
|
||||
|
||||
@ -55,27 +54,22 @@ void __init shmobile_init_delay(void)
|
||||
if (!of_property_read_u32(np, "clock-frequency", &freq))
|
||||
max_freq = max(max_freq, freq);
|
||||
|
||||
if (of_device_is_compatible(np, "arm,cortex-a8") ||
|
||||
of_device_is_compatible(np, "arm,cortex-a9")) {
|
||||
is_a7_a8_a9 = true;
|
||||
} else if (of_device_is_compatible(np, "arm,cortex-a7")) {
|
||||
is_a7_a8_a9 = true;
|
||||
has_arch_timer = true;
|
||||
} else if (of_device_is_compatible(np, "arm,cortex-a15")) {
|
||||
is_a15 = true;
|
||||
if (of_device_is_compatible(np, "arm,cortex-a8")) {
|
||||
div = 2;
|
||||
} else if (of_device_is_compatible(np, "arm,cortex-a9")) {
|
||||
div = 1;
|
||||
} else if (of_device_is_compatible(np, "arm,cortex-a7") ||
|
||||
of_device_is_compatible(np, "arm,cortex-a15")) {
|
||||
div = 1;
|
||||
has_arch_timer = true;
|
||||
}
|
||||
}
|
||||
|
||||
of_node_put(cpus);
|
||||
|
||||
if (!max_freq)
|
||||
if (!max_freq || !div)
|
||||
return;
|
||||
|
||||
if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
|
||||
if (is_a7_a8_a9)
|
||||
shmobile_setup_delay_hz(max_freq, 1, 3);
|
||||
else if (is_a15)
|
||||
shmobile_setup_delay_hz(max_freq, 2, 4);
|
||||
}
|
||||
if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
|
||||
shmobile_setup_delay_hz(max_freq, 1, div);
|
||||
}
|
||||
|
@ -70,7 +70,6 @@
|
||||
i2c3 = &i2c3;
|
||||
i2c4 = &i2c4;
|
||||
i2c5 = &i2c5;
|
||||
i2c6 = &i2c6;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -201,15 +201,12 @@
|
||||
|
||||
i2c2: i2c@58782000 {
|
||||
compatible = "socionext,uniphier-fi2c";
|
||||
status = "disabled";
|
||||
reg = <0x58782000 0x80>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <0 43 4>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_i2c2>;
|
||||
clocks = <&i2c_clk>;
|
||||
clock-frequency = <100000>;
|
||||
clock-frequency = <400000>;
|
||||
};
|
||||
|
||||
i2c3: i2c@58783000 {
|
||||
@ -227,12 +224,15 @@
|
||||
|
||||
i2c4: i2c@58784000 {
|
||||
compatible = "socionext,uniphier-fi2c";
|
||||
status = "disabled";
|
||||
reg = <0x58784000 0x80>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <0 45 4>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_i2c4>;
|
||||
clocks = <&i2c_clk>;
|
||||
clock-frequency = <400000>;
|
||||
clock-frequency = <100000>;
|
||||
};
|
||||
|
||||
i2c5: i2c@58785000 {
|
||||
@ -245,16 +245,6 @@
|
||||
clock-frequency = <400000>;
|
||||
};
|
||||
|
||||
i2c6: i2c@58786000 {
|
||||
compatible = "socionext,uniphier-fi2c";
|
||||
reg = <0x58786000 0x80>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
interrupts = <0 26 4>;
|
||||
clocks = <&i2c_clk>;
|
||||
clock-frequency = <400000>;
|
||||
};
|
||||
|
||||
system_bus: system-bus@58c00000 {
|
||||
compatible = "socionext,uniphier-system-bus";
|
||||
status = "disabled";
|
||||
|
@ -588,6 +588,15 @@ set_hcr:
|
||||
msr vpidr_el2, x0
|
||||
msr vmpidr_el2, x1
|
||||
|
||||
/*
|
||||
* When VHE is not in use, early init of EL2 and EL1 needs to be
|
||||
* done here.
|
||||
* When VHE _is_ in use, EL1 will not be used in the host and
|
||||
* requires no configuration, and all non-hyp-specific EL2 setup
|
||||
* will be done via the _EL1 system register aliases in __cpu_setup.
|
||||
*/
|
||||
cbnz x2, 1f
|
||||
|
||||
/* sctlr_el1 */
|
||||
mov x0, #0x0800 // Set/clear RES{1,0} bits
|
||||
CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
|
||||
@ -597,6 +606,7 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
|
||||
/* Coprocessor traps. */
|
||||
mov x0, #0x33ff
|
||||
msr cptr_el2, x0 // Disable copro. traps to EL2
|
||||
1:
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
msr hstr_el2, xzr // Disable CP15 traps to EL2
|
||||
@ -734,7 +744,8 @@ ENDPROC(__secondary_switched)
|
||||
|
||||
.macro update_early_cpu_boot_status status, tmp1, tmp2
|
||||
mov \tmp2, #\status
|
||||
str_l \tmp2, __early_cpu_boot_status, \tmp1
|
||||
adr_l \tmp1, __early_cpu_boot_status
|
||||
str \tmp2, [\tmp1]
|
||||
dmb sy
|
||||
dc ivac, \tmp1 // Invalidate potentially stale cache line
|
||||
.endm
|
||||
|
@ -52,6 +52,7 @@ static void write_pen_release(u64 val)
|
||||
static int smp_spin_table_cpu_init(unsigned int cpu)
|
||||
{
|
||||
struct device_node *dn;
|
||||
int ret;
|
||||
|
||||
dn = of_get_cpu_node(cpu, NULL);
|
||||
if (!dn)
|
||||
@ -60,15 +61,15 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
|
||||
/*
|
||||
* Determine the address from which the CPU is polling.
|
||||
*/
|
||||
if (of_property_read_u64(dn, "cpu-release-addr",
|
||||
&cpu_release_addr[cpu])) {
|
||||
ret = of_property_read_u64(dn, "cpu-release-addr",
|
||||
&cpu_release_addr[cpu]);
|
||||
if (ret)
|
||||
pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
|
||||
cpu);
|
||||
|
||||
return -1;
|
||||
}
|
||||
of_node_put(dn);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int smp_spin_table_cpu_prepare(unsigned int cpu)
|
||||
|
@ -31,6 +31,7 @@
|
||||
#define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
|
||||
0x00000040
|
||||
|
||||
/* Reserved - do not use 0x00000004 */
|
||||
#define PPC_FEATURE_TRUE_LE 0x00000002
|
||||
#define PPC_FEATURE_PPC_LE 0x00000001
|
||||
|
||||
|
@ -148,23 +148,25 @@ static struct ibm_pa_feature {
|
||||
unsigned long cpu_features; /* CPU_FTR_xxx bit */
|
||||
unsigned long mmu_features; /* MMU_FTR_xxx bit */
|
||||
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
|
||||
unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
|
||||
unsigned char pabyte; /* byte number in ibm,pa-features */
|
||||
unsigned char pabit; /* bit number (big-endian) */
|
||||
unsigned char invert; /* if 1, pa bit set => clear feature */
|
||||
} ibm_pa_features[] __initdata = {
|
||||
{0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
|
||||
{0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
|
||||
{CPU_FTR_CTRL, 0, 0, 0, 3, 0},
|
||||
{CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
|
||||
{CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
|
||||
{0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
|
||||
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
|
||||
{0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
|
||||
{0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
|
||||
{CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
|
||||
{CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
|
||||
{CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
|
||||
{0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
|
||||
{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
|
||||
/*
|
||||
* If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
|
||||
* we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
|
||||
* which is 0 if the kernel doesn't support TM.
|
||||
* If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
|
||||
* we don't want to turn on TM here, so we use the *_COMP versions
|
||||
* which are 0 if the kernel doesn't support TM.
|
||||
*/
|
||||
{CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
|
||||
{CPU_FTR_TM_COMP, 0, 0,
|
||||
PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
|
||||
};
|
||||
|
||||
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
|
||||
@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
|
||||
if (bit ^ fp->invert) {
|
||||
cur_cpu_spec->cpu_features |= fp->cpu_features;
|
||||
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
|
||||
cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
|
||||
cur_cpu_spec->mmu_features |= fp->mmu_features;
|
||||
} else {
|
||||
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
|
||||
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
|
||||
cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
|
||||
cur_cpu_spec->mmu_features &= ~fp->mmu_features;
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
#define hugepages_supported() cpu_has_pse
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
|
@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static unsigned char hv_get_nmi_reason(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init ms_hyperv_init_platform(void)
|
||||
{
|
||||
/*
|
||||
@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
|
||||
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
|
||||
#endif
|
||||
mark_tsc_unstable("running on Hyper-V");
|
||||
|
||||
/*
|
||||
* Generation 2 instances don't support reading the NMI status from
|
||||
* 0x61 port.
|
||||
*/
|
||||
if (efi_enabled(EFI_BOOT))
|
||||
x86_platform.get_nmi_reason = hv_get_nmi_reason;
|
||||
}
|
||||
|
||||
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
||||
|
@ -42,7 +42,7 @@ static void __init tango_clocksource_init(struct device_node *np)
|
||||
|
||||
ret = clocksource_mmio_init(xtal_in_cnt, "tango-xtal", xtal_freq, 350,
|
||||
32, clocksource_mmio_readl_up);
|
||||
if (!ret) {
|
||||
if (ret) {
|
||||
pr_err("%s: registration failed\n", np->full_name);
|
||||
return;
|
||||
}
|
||||
|
@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
|
||||
ptr->eptr = upper_32_bits(dma_addr);
|
||||
}
|
||||
|
||||
static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
|
||||
struct talitos_ptr *src_ptr, bool is_sec1)
|
||||
{
|
||||
dst_ptr->ptr = src_ptr->ptr;
|
||||
if (!is_sec1)
|
||||
dst_ptr->eptr = src_ptr->eptr;
|
||||
}
|
||||
|
||||
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
|
||||
bool is_sec1)
|
||||
{
|
||||
@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
|
||||
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
|
||||
: DMA_TO_DEVICE);
|
||||
|
||||
/* hmac data */
|
||||
desc->ptr[1].len = cpu_to_be16(areq->assoclen);
|
||||
if (sg_count > 1 &&
|
||||
(ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
|
||||
areq->assoclen,
|
||||
&edesc->link_tbl[tbl_off])) > 1) {
|
||||
tbl_off += ret;
|
||||
|
||||
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
|
||||
sizeof(struct talitos_ptr), 0);
|
||||
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
|
||||
|
||||
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
|
||||
edesc->dma_len, DMA_BIDIRECTIONAL);
|
||||
|
||||
tbl_off += ret;
|
||||
} else {
|
||||
to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
|
||||
desc->ptr[1].j_extent = 0;
|
||||
@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
|
||||
sg_link_tbl_len += authsize;
|
||||
|
||||
if (sg_count > 1 &&
|
||||
(ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
|
||||
sg_link_tbl_len,
|
||||
&edesc->link_tbl[tbl_off])) > 1) {
|
||||
tbl_off += ret;
|
||||
if (sg_count == 1) {
|
||||
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
|
||||
areq->assoclen, 0);
|
||||
} else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
|
||||
areq->assoclen, sg_link_tbl_len,
|
||||
&edesc->link_tbl[tbl_off])) >
|
||||
1) {
|
||||
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
|
||||
to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
|
||||
tbl_off *
|
||||
@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
|
||||
edesc->dma_len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
} else
|
||||
to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
|
||||
tbl_off += ret;
|
||||
} else {
|
||||
copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
|
||||
}
|
||||
|
||||
/* cipher out */
|
||||
desc->ptr[5].len = cpu_to_be16(cryptlen);
|
||||
@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
|
||||
edesc->icv_ool = false;
|
||||
|
||||
if (sg_count > 1 &&
|
||||
(sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
|
||||
if (sg_count == 1) {
|
||||
to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
|
||||
areq->assoclen, 0);
|
||||
} else if ((sg_count =
|
||||
sg_to_link_tbl_offset(areq->dst, sg_count,
|
||||
areq->assoclen, cryptlen,
|
||||
&edesc->link_tbl[tbl_off])) >
|
||||
1) {
|
||||
&edesc->link_tbl[tbl_off])) > 1) {
|
||||
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
|
||||
|
||||
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
|
||||
@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
edesc->dma_len, DMA_BIDIRECTIONAL);
|
||||
|
||||
edesc->icv_ool = true;
|
||||
} else
|
||||
to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
|
||||
} else {
|
||||
copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
|
||||
}
|
||||
|
||||
/* iv out */
|
||||
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
|
||||
@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
|
||||
struct talitos_alg_template algt;
|
||||
};
|
||||
|
||||
static int talitos_cra_init(struct crypto_tfm *tfm)
|
||||
static int talitos_init_common(struct talitos_ctx *ctx,
|
||||
struct talitos_crypto_alg *talitos_alg)
|
||||
{
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct talitos_crypto_alg *talitos_alg;
|
||||
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
struct talitos_private *priv;
|
||||
|
||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
|
||||
talitos_alg = container_of(__crypto_ahash_alg(alg),
|
||||
struct talitos_crypto_alg,
|
||||
algt.alg.hash);
|
||||
else
|
||||
talitos_alg = container_of(alg, struct talitos_crypto_alg,
|
||||
algt.alg.crypto);
|
||||
|
||||
/* update context with ptr to dev */
|
||||
ctx->dev = talitos_alg->dev;
|
||||
|
||||
@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int talitos_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *alg = tfm->__crt_alg;
|
||||
struct talitos_crypto_alg *talitos_alg;
|
||||
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
|
||||
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
|
||||
talitos_alg = container_of(__crypto_ahash_alg(alg),
|
||||
struct talitos_crypto_alg,
|
||||
algt.alg.hash);
|
||||
else
|
||||
talitos_alg = container_of(alg, struct talitos_crypto_alg,
|
||||
algt.alg.crypto);
|
||||
|
||||
return talitos_init_common(ctx, talitos_alg);
|
||||
}
|
||||
|
||||
static int talitos_cra_init_aead(struct crypto_aead *tfm)
|
||||
{
|
||||
talitos_cra_init(crypto_aead_tfm(tfm));
|
||||
return 0;
|
||||
struct aead_alg *alg = crypto_aead_alg(tfm);
|
||||
struct talitos_crypto_alg *talitos_alg;
|
||||
struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
|
||||
talitos_alg = container_of(alg, struct talitos_crypto_alg,
|
||||
algt.alg.aead);
|
||||
|
||||
return talitos_init_common(ctx, talitos_alg);
|
||||
}
|
||||
|
||||
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
|
||||
|
@ -362,6 +362,7 @@ struct sbridge_pvt {
|
||||
|
||||
/* Memory type detection */
|
||||
bool is_mirrored, is_lockstep, is_close_pg;
|
||||
bool is_chan_hash;
|
||||
|
||||
/* Fifo double buffers */
|
||||
struct mce mce_entry[MCE_LOG_LEN];
|
||||
@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
|
||||
return (pkg >> 2) & 0x1;
|
||||
}
|
||||
|
||||
static int haswell_chan_hash(int idx, u64 addr)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* XOR even bits from 12:26 to bit0 of idx,
|
||||
* odd bits from 13:27 to bit1
|
||||
*/
|
||||
for (i = 12; i < 28; i += 2)
|
||||
idx ^= (addr >> i) & 3;
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Memory check routines
|
||||
****************************************************************************/
|
||||
@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
|
||||
KNL_MAX_CHANNELS : NUM_CHANNELS;
|
||||
u64 knl_mc_sizes[KNL_MAX_CHANNELS];
|
||||
|
||||
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
|
||||
pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, ®);
|
||||
pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
|
||||
}
|
||||
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
|
||||
pvt->info.type == KNIGHTS_LANDING)
|
||||
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
|
||||
@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
}
|
||||
|
||||
ch_way = TAD_CH(reg) + 1;
|
||||
sck_way = 1 << TAD_SOCK(reg);
|
||||
sck_way = TAD_SOCK(reg);
|
||||
|
||||
if (ch_way == 3)
|
||||
idx = addr >> 6;
|
||||
else
|
||||
else {
|
||||
idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
|
||||
if (pvt->is_chan_hash)
|
||||
idx = haswell_chan_hash(idx, addr);
|
||||
}
|
||||
idx = idx % ch_way;
|
||||
|
||||
/*
|
||||
@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
switch(ch_way) {
|
||||
case 2:
|
||||
case 4:
|
||||
sck_xch = 1 << sck_way * (ch_way >> 1);
|
||||
sck_xch = (1 << sck_way) * (ch_way >> 1);
|
||||
break;
|
||||
default:
|
||||
sprintf(msg, "Invalid mirror set. Can't decode addr");
|
||||
@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
|
||||
ch_addr = addr - offset;
|
||||
ch_addr >>= (6 + shiftup);
|
||||
ch_addr /= ch_way * sck_way;
|
||||
ch_addr /= sck_xch;
|
||||
ch_addr <<= (6 + shiftup);
|
||||
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
|
||||
|
||||
|
@ -360,7 +360,7 @@ static struct cpuidle_ops psci_cpuidle_ops __initdata = {
|
||||
.init = psci_dt_cpu_init_idle,
|
||||
};
|
||||
|
||||
CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
|
||||
CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
|
||||
struct amdgpu_bo *vcpu_bo;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
unsigned fw_version;
|
||||
void *saved_bo;
|
||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||
|
@ -425,6 +425,10 @@ static int acp_resume(void *handle)
|
||||
struct acp_pm_domain *apd;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* return early if no ACP */
|
||||
if (!adev->acp.acp_genpd)
|
||||
return 0;
|
||||
|
||||
/* SMU block will power on ACP irrespective of ACP runtime status.
|
||||
* Power off explicitly based on genpd ACP runtime status so that ACP
|
||||
* hw and ACP-genpd status are in sync.
|
||||
|
@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
fw_info.feature = adev->vce.fb_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_UVD:
|
||||
fw_info.ver = 0;
|
||||
fw_info.ver = adev->uvd.fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GMC:
|
||||
|
@ -53,7 +53,7 @@ struct amdgpu_hpd;
|
||||
|
||||
#define AMDGPU_MAX_HPD_PINS 6
|
||||
#define AMDGPU_MAX_CRTCS 6
|
||||
#define AMDGPU_MAX_AFMT_BLOCKS 7
|
||||
#define AMDGPU_MAX_AFMT_BLOCKS 9
|
||||
|
||||
enum amdgpu_rmx_type {
|
||||
RMX_OFF,
|
||||
@ -309,8 +309,8 @@ struct amdgpu_mode_info {
|
||||
struct atom_context *atom_context;
|
||||
struct card_info *atom_card_info;
|
||||
bool mode_config_initialized;
|
||||
struct amdgpu_crtc *crtcs[6];
|
||||
struct amdgpu_afmt *afmt[7];
|
||||
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
|
||||
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
|
||||
/* DVI-I properties */
|
||||
struct drm_property *coherent_mode_property;
|
||||
/* DAC enable load detect */
|
||||
|
@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
|
||||
|
||||
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
|
||||
return -EPERM;
|
||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
||||
}
|
||||
|
||||
|
@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
||||
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
|
||||
version_major, version_minor, family_id);
|
||||
|
||||
adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
|
||||
(family_id << 8));
|
||||
|
||||
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
||||
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
|
||||
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
|
||||
@ -255,6 +258,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
||||
if (i == AMDGPU_MAX_UVD_HANDLES)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
|
||||
|
@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
|
||||
if (i == AMDGPU_MAX_VCE_HANDLES)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
/* TODO: suspending running encoding sessions isn't supported */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
||||
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
|
||||
int i;
|
||||
|
||||
port = drm_dp_get_validated_port_ref(mgr, port);
|
||||
if (!port)
|
||||
return -EINVAL;
|
||||
|
||||
port_num = port->port_num;
|
||||
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
|
||||
if (!mstb) {
|
||||
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
|
||||
|
||||
if (!mstb)
|
||||
if (!mstb) {
|
||||
drm_dp_put_port(port);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
|
||||
@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
||||
kfree(txmsg);
|
||||
fail_put:
|
||||
drm_dp_put_mst_branch_device(mstb);
|
||||
drm_dp_put_port(port);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
|
||||
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
|
||||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
|
||||
IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
|
||||
IS_SKL_GT3(dev) || \
|
||||
IS_SKL_GT4(dev))
|
||||
|
||||
/*
|
||||
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
|
||||
* even when in MSI mode. This results in spurious interrupt warnings if the
|
||||
|
@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||
if (pvec != NULL) {
|
||||
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < npages) {
|
||||
ret = get_user_pages_remote(work->task, mm,
|
||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||
npages - pinned,
|
||||
!obj->userptr.read_only, 0,
|
||||
pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
ret = -EFAULT;
|
||||
if (atomic_inc_not_zero(&mm->mm_users)) {
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < npages) {
|
||||
ret = get_user_pages_remote
|
||||
(work->task, mm,
|
||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||
npages - pinned,
|
||||
!obj->userptr.read_only, 0,
|
||||
pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
pinned += ret;
|
||||
pinned += ret;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
|
||||
if (unlikely(total_bytes > remain_usable)) {
|
||||
/*
|
||||
* The base request will fit but the reserved space
|
||||
* falls off the end. So only need to to wait for the
|
||||
* reserved size after flushing out the remainder.
|
||||
* falls off the end. So don't need an immediate wrap
|
||||
* and only need to effectively wait for the reserved
|
||||
* size space from the start of ringbuffer.
|
||||
*/
|
||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||
need_wrap = true;
|
||||
} else if (total_bytes > ringbuf->space) {
|
||||
/* No wrapping required, just waiting. */
|
||||
wait_bytes = total_bytes;
|
||||
@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
|
||||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||
int ret;
|
||||
|
||||
ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
|
||||
ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* We're using qword write, seqno should be aligned to 8 bytes. */
|
||||
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
|
||||
|
||||
/* w/a for post sync ops following a GPGPU operation we
|
||||
* need a prior CS_STALL, which is emitted by the flush
|
||||
* following the batch.
|
||||
*/
|
||||
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
|
||||
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
(PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
|
||||
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
|
||||
intel_logical_ring_emit(ringbuf, 0);
|
||||
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
|
||||
/* We're thrashing one dword of HWS. */
|
||||
intel_logical_ring_emit(ringbuf, 0);
|
||||
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
return intel_logical_ring_advance_and_submit(request);
|
||||
}
|
||||
|
||||
|
@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
|
||||
const struct drm_plane_state *pstate,
|
||||
int y)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
|
||||
struct drm_framebuffer *fb = pstate->fb;
|
||||
uint32_t width = 0, height = 0;
|
||||
|
||||
width = drm_rect_width(&intel_pstate->src) >> 16;
|
||||
height = drm_rect_height(&intel_pstate->src) >> 16;
|
||||
|
||||
if (intel_rotation_90_or_270(pstate->rotation))
|
||||
swap(width, height);
|
||||
|
||||
/* for planar format */
|
||||
if (fb->pixel_format == DRM_FORMAT_NV12) {
|
||||
if (y) /* y-plane data rate */
|
||||
return intel_crtc->config->pipe_src_w *
|
||||
intel_crtc->config->pipe_src_h *
|
||||
return width * height *
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
else /* uv-plane data rate */
|
||||
return (intel_crtc->config->pipe_src_w/2) *
|
||||
(intel_crtc->config->pipe_src_h/2) *
|
||||
return (width / 2) * (height / 2) *
|
||||
drm_format_plane_cpp(fb->pixel_format, 1);
|
||||
}
|
||||
|
||||
/* for packed formats */
|
||||
return intel_crtc->config->pipe_src_w *
|
||||
intel_crtc->config->pipe_src_h *
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
int id = skl_wm_plane_id(intel_plane);
|
||||
|
||||
if (fb == NULL)
|
||||
if (!to_intel_plane_state(plane->state)->visible)
|
||||
continue;
|
||||
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
||||
@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
||||
uint16_t plane_blocks, y_plane_blocks = 0;
|
||||
int id = skl_wm_plane_id(intel_plane);
|
||||
|
||||
if (pstate->fb == NULL)
|
||||
if (!to_intel_plane_state(pstate)->visible)
|
||||
continue;
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
||||
{
|
||||
struct drm_plane *plane = &intel_plane->base;
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
struct intel_plane_state *intel_pstate =
|
||||
to_intel_plane_state(plane->state);
|
||||
uint32_t latency = dev_priv->wm.skl_latency[level];
|
||||
uint32_t method1, method2;
|
||||
uint32_t plane_bytes_per_line, plane_blocks_per_line;
|
||||
uint32_t res_blocks, res_lines;
|
||||
uint32_t selected_result;
|
||||
uint8_t cpp;
|
||||
uint32_t width = 0, height = 0;
|
||||
|
||||
if (latency == 0 || !cstate->base.active || !fb)
|
||||
if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
|
||||
return false;
|
||||
|
||||
width = drm_rect_width(&intel_pstate->src) >> 16;
|
||||
height = drm_rect_height(&intel_pstate->src) >> 16;
|
||||
|
||||
if (intel_rotation_90_or_270(plane->state->rotation))
|
||||
swap(width, height);
|
||||
|
||||
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
|
||||
cpp, latency);
|
||||
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
cstate->pipe_src_w,
|
||||
cpp, fb->modifier[0],
|
||||
width,
|
||||
cpp,
|
||||
fb->modifier[0],
|
||||
latency);
|
||||
|
||||
plane_bytes_per_line = cstate->pipe_src_w * cpp;
|
||||
plane_bytes_per_line = width * cpp;
|
||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||
|
||||
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||
|
@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
|
||||
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
|
||||
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
|
||||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
|
||||
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
|
||||
@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
WA_SET_BIT_MASKED(HIZ_CHICKEN,
|
||||
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
|
||||
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
|
||||
/* This is tied to WaForceContextSaveRestoreNonCoherent */
|
||||
if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
|
||||
/*
|
||||
*Use Force Non-Coherent whenever executing a 3D context. This
|
||||
* is a workaround for a possible hang in the unlikely event
|
||||
@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj = ringbuf->obj;
|
||||
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
|
||||
unsigned flags = PIN_OFFSET_BIAS | 4096;
|
||||
int ret;
|
||||
|
||||
if (HAS_LLC(dev_priv) && !obj->stolen) {
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
|
||||
flags | PIN_MAPPABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
|
||||
if (unlikely(total_bytes > remain_usable)) {
|
||||
/*
|
||||
* The base request will fit but the reserved space
|
||||
* falls off the end. So only need to to wait for the
|
||||
* reserved size after flushing out the remainder.
|
||||
* falls off the end. So don't need an immediate wrap
|
||||
* and only need to effectively wait for the reserved
|
||||
* size space from the start of ringbuffer.
|
||||
*/
|
||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||
need_wrap = true;
|
||||
} else if (total_bytes > ringbuf->space) {
|
||||
/* No wrapping required, just waiting. */
|
||||
wait_bytes = total_bytes;
|
||||
|
@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get =
|
||||
fw_domains_get_with_thread_status;
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
if (IS_HASWELL(dev))
|
||||
dev_priv->uncore.funcs.force_wake_put =
|
||||
fw_domains_put_with_fifo;
|
||||
else
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
|
||||
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
|
@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
|
||||
break;
|
||||
default:
|
||||
if (disp->dithering_mode) {
|
||||
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
|
||||
drm_object_attach_property(&connector->base,
|
||||
disp->dithering_mode,
|
||||
nv_connector->
|
||||
dithering_mode);
|
||||
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
|
||||
}
|
||||
if (disp->dithering_depth) {
|
||||
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
|
||||
drm_object_attach_property(&connector->base,
|
||||
disp->dithering_depth,
|
||||
nv_connector->
|
||||
dithering_depth);
|
||||
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
|
||||
|
||||
gf100_gr_mmio(gr, gr->func->mmio);
|
||||
|
||||
nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
|
||||
|
||||
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
|
||||
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
|
||||
do {
|
||||
|
@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
|
||||
return radeon_atpx_priv.atpx_detected;
|
||||
}
|
||||
|
||||
bool radeon_has_atpx_dgpu_power_cntl(void) {
|
||||
return radeon_atpx_priv.atpx.functions.power_cntl;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_atpx_call - call an ATPX method
|
||||
*
|
||||
@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
|
||||
*/
|
||||
static int radeon_atpx_validate(struct radeon_atpx *atpx)
|
||||
{
|
||||
/* make sure required functions are enabled */
|
||||
/* dGPU power control is required */
|
||||
if (atpx->functions.power_cntl == false) {
|
||||
printk("ATPX dGPU power cntl not present, forcing\n");
|
||||
atpx->functions.power_cntl = true;
|
||||
}
|
||||
|
||||
if (atpx->functions.px_params) {
|
||||
union acpi_object *info;
|
||||
struct atpx_px_params output;
|
||||
|
@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
|
||||
if (radeon_audio != 0)
|
||||
if (radeon_audio != 0) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (ASIC_IS_DCE5(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.output_csc_property,
|
||||
@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
|
||||
radeon_connector->dac_load_detect = true;
|
||||
@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (ASIC_IS_DCE5(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (ASIC_IS_DCE5(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
|
@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
|
||||
"LAST",
|
||||
};
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool radeon_has_atpx_dgpu_power_cntl(void);
|
||||
#else
|
||||
static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
|
||||
#endif
|
||||
|
||||
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
||||
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
|
||||
|
||||
@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
}
|
||||
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
|
||||
|
||||
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
|
||||
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
|
||||
pdev->subsystem_vendor, pdev->subsystem_device);
|
||||
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
|
||||
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
|
||||
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
|
||||
|
||||
/* mutex initialization are all done here so we
|
||||
* can recall function without having locking issues */
|
||||
@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
* ignore it */
|
||||
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
|
||||
|
||||
if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
|
||||
if (rdev->flags & RADEON_IS_PX)
|
||||
runtime = true;
|
||||
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
|
||||
if (runtime)
|
||||
|
@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
|
||||
if (radeon_ttm_tt_has_userptr(bo->ttm))
|
||||
return -EPERM;
|
||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
||||
}
|
||||
|
||||
|
@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
|
||||
{ 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_hca_vport_context *rep;
|
||||
int max_mtu;
|
||||
int oper_mtu;
|
||||
u16 max_mtu;
|
||||
u16 oper_mtu;
|
||||
int err;
|
||||
u8 ib_link_width_oper;
|
||||
u8 vl_hw_cap;
|
||||
|
@ -153,6 +153,7 @@ static const struct xpad_device {
|
||||
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
|
||||
{ 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
|
||||
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
|
||||
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
|
||||
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
|
||||
@ -304,6 +305,7 @@ static struct usb_device_id xpad_table[] = {
|
||||
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
|
||||
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
|
||||
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
|
||||
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
|
||||
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
|
||||
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
|
||||
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
|
||||
|
@ -178,7 +178,6 @@ static int arizona_haptics_probe(struct platform_device *pdev)
|
||||
input_set_drvdata(haptics->input_dev, haptics);
|
||||
|
||||
haptics->input_dev->name = "arizona:haptics";
|
||||
haptics->input_dev->dev.parent = pdev->dev.parent;
|
||||
haptics->input_dev->close = arizona_haptics_close;
|
||||
__set_bit(FF_RUMBLE, haptics->input_dev->ffbit);
|
||||
|
||||
|
@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
|
||||
if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
|
||||
kpd_delay = 15625;
|
||||
|
||||
if (kpd_delay > 62500 || kpd_delay == 0) {
|
||||
/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
|
||||
if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
|
||||
dev_err(&pdev->dev, "invalid power key trigger delay\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
|
||||
pwr->name = "pmic8xxx_pwrkey";
|
||||
pwr->phys = "pmic8xxx_pwrkey/input0";
|
||||
|
||||
delay = (kpd_delay << 10) / USEC_PER_SEC;
|
||||
delay = 1 + ilog2(delay);
|
||||
delay = (kpd_delay << 6) / USEC_PER_SEC;
|
||||
delay = ilog2(delay);
|
||||
|
||||
err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
|
||||
if (err < 0) {
|
||||
|
@ -222,7 +222,6 @@ static int twl4030_vibra_probe(struct platform_device *pdev)
|
||||
|
||||
info->input_dev->name = "twl4030:vibrator";
|
||||
info->input_dev->id.version = 1;
|
||||
info->input_dev->dev.parent = pdev->dev.parent;
|
||||
info->input_dev->close = twl4030_vibra_close;
|
||||
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
|
||||
|
||||
|
@ -45,7 +45,6 @@
|
||||
struct vibra_info {
|
||||
struct device *dev;
|
||||
struct input_dev *input_dev;
|
||||
struct workqueue_struct *workqueue;
|
||||
struct work_struct play_work;
|
||||
struct mutex mutex;
|
||||
int irq;
|
||||
@ -213,11 +212,7 @@ static int vibra_play(struct input_dev *input, void *data,
|
||||
info->strong_speed = effect->u.rumble.strong_magnitude;
|
||||
info->direction = effect->direction < EFFECT_DIR_180_DEG ? 1 : -1;
|
||||
|
||||
ret = queue_work(info->workqueue, &info->play_work);
|
||||
if (!ret) {
|
||||
dev_info(&input->dev, "work is already on queue\n");
|
||||
return ret;
|
||||
}
|
||||
schedule_work(&info->play_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -362,7 +357,6 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
|
||||
|
||||
info->input_dev->name = "twl6040:vibrator";
|
||||
info->input_dev->id.version = 1;
|
||||
info->input_dev->dev.parent = pdev->dev.parent;
|
||||
info->input_dev->close = twl6040_vibra_close;
|
||||
__set_bit(FF_RUMBLE, info->input_dev->ffbit);
|
||||
|
||||
|
@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
|
||||
goto err_free_buf;
|
||||
}
|
||||
|
||||
/* Sanity check that a device has an endpoint */
|
||||
if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
|
||||
dev_err(&usbinterface->dev,
|
||||
"Invalid number of endpoints\n");
|
||||
error = -EINVAL;
|
||||
goto err_free_urb;
|
||||
}
|
||||
|
||||
/*
|
||||
* The endpoint is always altsetting 0, we know this since we know
|
||||
* this device only has one interrupt endpoint
|
||||
@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
|
||||
* HID report descriptor
|
||||
*/
|
||||
if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
|
||||
HID_DEVICE_TYPE, &hid_desc) != 0){
|
||||
HID_DEVICE_TYPE, &hid_desc) != 0) {
|
||||
dev_err(&usbinterface->dev,
|
||||
"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
|
||||
error = -EIO;
|
||||
|
@ -92,6 +92,7 @@ struct iommu_dev_data {
|
||||
struct list_head dev_data_list; /* For global dev_data_list */
|
||||
struct protection_domain *domain; /* Domain the device is bound to */
|
||||
u16 devid; /* PCI Device ID */
|
||||
u16 alias; /* Alias Device ID */
|
||||
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
||||
bool passthrough; /* Device is identity mapped */
|
||||
struct {
|
||||
@ -166,6 +167,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
|
||||
return container_of(dom, struct protection_domain, domain);
|
||||
}
|
||||
|
||||
static inline u16 get_device_id(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
return PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
}
|
||||
|
||||
static struct iommu_dev_data *alloc_dev_data(u16 devid)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
@ -203,6 +211,68 @@ out_unlock:
|
||||
return dev_data;
|
||||
}
|
||||
|
||||
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
|
||||
{
|
||||
*(u16 *)data = alias;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u16 get_alias(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
u16 devid, ivrs_alias, pci_alias;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
ivrs_alias = amd_iommu_alias_table[devid];
|
||||
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
|
||||
|
||||
if (ivrs_alias == pci_alias)
|
||||
return ivrs_alias;
|
||||
|
||||
/*
|
||||
* DMA alias showdown
|
||||
*
|
||||
* The IVRS is fairly reliable in telling us about aliases, but it
|
||||
* can't know about every screwy device. If we don't have an IVRS
|
||||
* reported alias, use the PCI reported alias. In that case we may
|
||||
* still need to initialize the rlookup and dev_table entries if the
|
||||
* alias is to a non-existent device.
|
||||
*/
|
||||
if (ivrs_alias == devid) {
|
||||
if (!amd_iommu_rlookup_table[pci_alias]) {
|
||||
amd_iommu_rlookup_table[pci_alias] =
|
||||
amd_iommu_rlookup_table[devid];
|
||||
memcpy(amd_iommu_dev_table[pci_alias].data,
|
||||
amd_iommu_dev_table[devid].data,
|
||||
sizeof(amd_iommu_dev_table[pci_alias].data));
|
||||
}
|
||||
|
||||
return pci_alias;
|
||||
}
|
||||
|
||||
pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
|
||||
"for device %s[%04x:%04x], kernel reported alias "
|
||||
"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
|
||||
PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
|
||||
PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
|
||||
PCI_FUNC(pci_alias));
|
||||
|
||||
/*
|
||||
* If we don't have a PCI DMA alias and the IVRS alias is on the same
|
||||
* bus, then the IVRS table may know about a quirk that we don't.
|
||||
*/
|
||||
if (pci_alias == devid &&
|
||||
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
|
||||
pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
|
||||
pdev->dma_alias_devfn = ivrs_alias & 0xff;
|
||||
pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
|
||||
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
|
||||
dev_name(dev));
|
||||
}
|
||||
|
||||
return ivrs_alias;
|
||||
}
|
||||
|
||||
static struct iommu_dev_data *find_dev_data(u16 devid)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
@ -215,13 +285,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
|
||||
return dev_data;
|
||||
}
|
||||
|
||||
static inline u16 get_device_id(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
return PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
}
|
||||
|
||||
static struct iommu_dev_data *get_dev_data(struct device *dev)
|
||||
{
|
||||
return dev->archdata.iommu;
|
||||
@ -349,6 +412,8 @@ static int iommu_init_device(struct device *dev)
|
||||
if (!dev_data)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_data->alias = get_alias(dev);
|
||||
|
||||
if (pci_iommuv2_capable(pdev)) {
|
||||
struct amd_iommu *iommu;
|
||||
|
||||
@ -369,7 +434,7 @@ static void iommu_ignore_device(struct device *dev)
|
||||
u16 devid, alias;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
alias = amd_iommu_alias_table[devid];
|
||||
alias = get_alias(dev);
|
||||
|
||||
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
|
||||
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
|
||||
@ -1061,7 +1126,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
|
||||
int ret;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
alias = amd_iommu_alias_table[dev_data->devid];
|
||||
alias = dev_data->alias;
|
||||
|
||||
ret = iommu_flush_dte(iommu, dev_data->devid);
|
||||
if (!ret && alias != dev_data->devid)
|
||||
@ -2039,7 +2104,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
|
||||
bool ats;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
alias = amd_iommu_alias_table[dev_data->devid];
|
||||
alias = dev_data->alias;
|
||||
ats = dev_data->ats.enabled;
|
||||
|
||||
/* Update data structures */
|
||||
@ -2073,7 +2138,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
|
||||
return;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
alias = amd_iommu_alias_table[dev_data->devid];
|
||||
alias = dev_data->alias;
|
||||
|
||||
/* decrease reference counters */
|
||||
dev_data->domain->dev_iommu[iommu->index] -= 1;
|
||||
|
@ -826,6 +826,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
||||
if (smmu_domain->smmu)
|
||||
goto out_unlock;
|
||||
|
||||
/* We're bypassing these SIDs, so don't allocate an actual context */
|
||||
if (domain->type == IOMMU_DOMAIN_DMA) {
|
||||
smmu_domain->smmu = smmu;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mapping the requested stage onto what we support is surprisingly
|
||||
* complicated, mainly because the spec allows S1+S2 SMMUs without
|
||||
@ -948,7 +954,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
|
||||
void __iomem *cb_base;
|
||||
int irq;
|
||||
|
||||
if (!smmu)
|
||||
if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -1089,18 +1095,20 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
|
||||
struct arm_smmu_device *smmu = smmu_domain->smmu;
|
||||
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
||||
|
||||
/*
|
||||
* FIXME: This won't be needed once we have IOMMU-backed DMA ops
|
||||
* for all devices behind the SMMU. Note that we need to take
|
||||
* care configuring SMRs for devices both a platform_device and
|
||||
* and a PCI device (i.e. a PCI host controller)
|
||||
*/
|
||||
if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
|
||||
return 0;
|
||||
|
||||
/* Devices in an IOMMU group may already be configured */
|
||||
ret = arm_smmu_master_configure_smrs(smmu, cfg);
|
||||
if (ret)
|
||||
return ret == -EEXIST ? 0 : ret;
|
||||
|
||||
/*
|
||||
* FIXME: This won't be needed once we have IOMMU-backed DMA ops
|
||||
* for all devices behind the SMMU.
|
||||
*/
|
||||
if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < cfg->num_streamids; ++i) {
|
||||
u32 idx, s2cr;
|
||||
|
||||
|
@ -467,7 +467,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
|
||||
gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
|
||||
|
||||
/* Update the pcpu_masks */
|
||||
for (i = 0; i < gic_vpes; i++)
|
||||
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
|
||||
clear_bit(irq, pcpu_masks[i].pcpu_mask);
|
||||
set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
|
||||
|
||||
@ -707,7 +707,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
gic_map_to_pin(intr, gic_cpu_pin);
|
||||
gic_map_to_vpe(intr, vpe);
|
||||
for (i = 0; i < gic_vpes; i++)
|
||||
for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
|
||||
clear_bit(intr, pcpu_masks[i].pcpu_mask);
|
||||
set_bit(intr, pcpu_masks[vpe].pcpu_mask);
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
@ -62,9 +62,8 @@ config DUMMY
|
||||
this device is consigned into oblivion) with a configurable IP
|
||||
address. It is most commonly used in order to make your currently
|
||||
inactive SLIP address seem like a real address for local programs.
|
||||
If you use SLIP or PPP, you might want to say Y here. Since this
|
||||
thing often comes in handy, the default is Y. It won't enlarge your
|
||||
kernel either. What a deal. Read about it in the Network
|
||||
If you use SLIP or PPP, you might want to say Y here. It won't
|
||||
enlarge your kernel. What a deal. Read about it in the Network
|
||||
Administrator's Guide, available from
|
||||
<http://www.tldp.org/docs.html#guide>.
|
||||
|
||||
|
@ -405,7 +405,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
|
||||
u32 packets = 0;
|
||||
u32 bytes = 0;
|
||||
int factor = priv->cqe_factor;
|
||||
u64 timestamp = 0;
|
||||
int done = 0;
|
||||
int budget = priv->tx_work_limit;
|
||||
u32 last_nr_txbb;
|
||||
@ -445,9 +444,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
|
||||
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
|
||||
|
||||
do {
|
||||
u64 timestamp = 0;
|
||||
|
||||
txbbs_skipped += last_nr_txbb;
|
||||
ring_index = (ring_index + last_nr_txbb) & size_mask;
|
||||
if (ring->tx_info[ring_index].ts_requested)
|
||||
|
||||
if (unlikely(ring->tx_info[ring_index].ts_requested))
|
||||
timestamp = mlx4_en_get_cqe_ts(cqe);
|
||||
|
||||
/* free next descriptor */
|
||||
|
@ -510,7 +510,7 @@ enum mlx5e_link_mode {
|
||||
MLX5E_100GBASE_KR4 = 22,
|
||||
MLX5E_100GBASE_LR4 = 23,
|
||||
MLX5E_100BASE_TX = 24,
|
||||
MLX5E_100BASE_T = 25,
|
||||
MLX5E_1000BASE_T = 25,
|
||||
MLX5E_10GBASE_T = 26,
|
||||
MLX5E_25GBASE_CR = 27,
|
||||
MLX5E_25GBASE_KR = 28,
|
||||
|
@ -138,10 +138,10 @@ static const struct {
|
||||
[MLX5E_100BASE_TX] = {
|
||||
.speed = 100,
|
||||
},
|
||||
[MLX5E_100BASE_T] = {
|
||||
.supported = SUPPORTED_100baseT_Full,
|
||||
.advertised = ADVERTISED_100baseT_Full,
|
||||
.speed = 100,
|
||||
[MLX5E_1000BASE_T] = {
|
||||
.supported = SUPPORTED_1000baseT_Full,
|
||||
.advertised = ADVERTISED_1000baseT_Full,
|
||||
.speed = 1000,
|
||||
},
|
||||
[MLX5E_10GBASE_T] = {
|
||||
.supported = SUPPORTED_10000baseT_Full,
|
||||
|
@ -1542,24 +1542,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
|
||||
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int hw_mtu;
|
||||
u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
|
||||
int err;
|
||||
|
||||
err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
|
||||
err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
|
||||
/* Update vport context MTU */
|
||||
mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
|
||||
netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
|
||||
__func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
|
||||
static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u16 hw_mtu = 0;
|
||||
int err;
|
||||
|
||||
netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
|
||||
err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
|
||||
if (err || !hw_mtu) /* fallback to port oper mtu */
|
||||
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
|
||||
|
||||
*mtu = MLX5E_HW2SW_MTU(hw_mtu);
|
||||
}
|
||||
|
||||
static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
u16 mtu;
|
||||
int err;
|
||||
|
||||
err = mlx5e_set_mtu(priv, netdev->mtu);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlx5e_query_mtu(priv, &mtu);
|
||||
if (mtu != netdev->mtu)
|
||||
netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
|
||||
__func__, mtu, netdev->mtu);
|
||||
|
||||
netdev->mtu = mtu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2257,22 +2283,27 @@ static int mlx5e_set_features(struct net_device *netdev,
|
||||
return err ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
#define MXL5_HW_MIN_MTU 64
|
||||
#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
|
||||
|
||||
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
bool was_opened;
|
||||
int max_mtu;
|
||||
u16 max_mtu;
|
||||
u16 min_mtu;
|
||||
int err = 0;
|
||||
|
||||
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
|
||||
|
||||
max_mtu = MLX5E_HW2SW_MTU(max_mtu);
|
||||
min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
|
||||
|
||||
if (new_mtu > max_mtu) {
|
||||
if (new_mtu > max_mtu || new_mtu < min_mtu) {
|
||||
netdev_err(netdev,
|
||||
"%s: Bad MTU (%d) > (%d) Max\n",
|
||||
__func__, new_mtu, max_mtu);
|
||||
"%s: Bad MTU (%d), valid range is: [%d..%d]\n",
|
||||
__func__, new_mtu, min_mtu, max_mtu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -2966,7 +2997,16 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
|
||||
schedule_work(&priv->set_rx_mode_work);
|
||||
mlx5e_disable_async_events(priv);
|
||||
flush_scheduled_work();
|
||||
unregister_netdev(netdev);
|
||||
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
|
||||
netif_device_detach(netdev);
|
||||
mutex_lock(&priv->state_lock);
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
mlx5e_close_locked(netdev);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
} else {
|
||||
unregister_netdev(netdev);
|
||||
}
|
||||
|
||||
mlx5e_tc_cleanup(priv);
|
||||
mlx5e_vxlan_cleanup(priv);
|
||||
mlx5e_destroy_q_counter(priv);
|
||||
@ -2981,7 +3021,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
|
||||
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
|
||||
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
|
||||
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
|
||||
free_netdev(netdev);
|
||||
|
||||
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
|
||||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
static void *mlx5e_get_netdev(void *vpriv)
|
||||
|
@ -1065,33 +1065,6 @@ unlock_fg:
|
||||
return rule;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
|
||||
u8 match_criteria_enable,
|
||||
u32 *match_criteria,
|
||||
u32 *match_value,
|
||||
u8 action,
|
||||
u32 flow_tag,
|
||||
struct mlx5_flow_destination *dest)
|
||||
{
|
||||
struct mlx5_flow_rule *rule;
|
||||
struct mlx5_flow_group *g;
|
||||
|
||||
g = create_autogroup(ft, match_criteria_enable, match_criteria);
|
||||
if (IS_ERR(g))
|
||||
return (void *)g;
|
||||
|
||||
rule = add_rule_fg(g, match_value,
|
||||
action, flow_tag, dest);
|
||||
if (IS_ERR(rule)) {
|
||||
/* Remove assumes refcount > 0 and autogroup creates a group
|
||||
* with a refcount = 0.
|
||||
*/
|
||||
tree_get_node(&g->node);
|
||||
tree_remove_node(&g->node);
|
||||
}
|
||||
return rule;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_rule *
|
||||
_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
|
||||
u8 match_criteria_enable,
|
||||
@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
|
||||
match_value, action, flow_tag, dest);
|
||||
g = create_autogroup(ft, match_criteria_enable, match_criteria);
|
||||
if (IS_ERR(g)) {
|
||||
rule = (void *)g;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
rule = add_rule_fg(g, match_value,
|
||||
action, flow_tag, dest);
|
||||
if (IS_ERR(rule)) {
|
||||
/* Remove assumes refcount > 0 and autogroup creates a group
|
||||
* with a refcount = 0.
|
||||
*/
|
||||
unlock_ref_node(&ft->node);
|
||||
tree_get_node(&g->node);
|
||||
tree_remove_node(&g->node);
|
||||
return rule;
|
||||
}
|
||||
unlock:
|
||||
unlock_ref_node(&ft->node);
|
||||
return rule;
|
||||
@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
|
||||
int prio;
|
||||
static struct fs_prio *fs_prio;
|
||||
struct fs_prio *fs_prio;
|
||||
struct mlx5_flow_namespace *ns;
|
||||
|
||||
if (!root_ns)
|
||||
|
@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
int err;
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
|
||||
if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
|
||||
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
|
||||
__func__);
|
||||
goto out;
|
||||
@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
if (err)
|
||||
pr_info("failed request module on %s\n", MLX5_IB_MOD);
|
||||
|
||||
dev->interface_state = MLX5_INTERFACE_STATE_UP;
|
||||
clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
|
||||
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
out:
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
|
||||
@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
}
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
|
||||
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
|
||||
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
|
||||
__func__);
|
||||
goto out;
|
||||
@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
|
||||
mlx5_cmd_cleanup(dev);
|
||||
|
||||
out:
|
||||
dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
|
||||
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
|
||||
set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
|
||||
mutex_unlock(&dev->intf_state_mutex);
|
||||
return err;
|
||||
}
|
||||
@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
|
||||
.resume = mlx5_pci_resume
|
||||
};
|
||||
|
||||
static void shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
|
||||
struct mlx5_priv *priv = &dev->priv;
|
||||
|
||||
dev_info(&pdev->dev, "Shutdown was called\n");
|
||||
/* Notify mlx5 clients that the kernel is being shut down */
|
||||
set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
|
||||
mlx5_unload_one(dev, priv);
|
||||
mlx5_pci_disable_device(dev);
|
||||
}
|
||||
|
||||
static const struct pci_device_id mlx5_core_pci_table[] = {
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
|
||||
@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
|
||||
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = {
|
||||
.id_table = mlx5_core_pci_table,
|
||||
.probe = init_one,
|
||||
.remove = remove_one,
|
||||
.shutdown = shutdown,
|
||||
.err_handler = &mlx5_err_handler,
|
||||
.sriov_configure = mlx5_core_sriov_configure,
|
||||
};
|
||||
|
@ -260,8 +260,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
|
||||
|
||||
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
|
||||
int *max_mtu, int *oper_mtu, u8 port)
|
||||
static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
|
||||
u16 *max_mtu, u16 *oper_mtu, u8 port)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
@ -281,7 +281,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
|
||||
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
|
||||
}
|
||||
|
||||
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
|
||||
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
|
||||
@ -296,14 +296,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
|
||||
|
||||
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
|
||||
void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
|
||||
u8 port)
|
||||
{
|
||||
mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
|
||||
|
||||
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
|
||||
void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
|
||||
u8 port)
|
||||
{
|
||||
mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
|
||||
|
@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
|
||||
|
||||
int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
|
||||
{
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
u32 *out;
|
||||
int err;
|
||||
|
||||
out = mlx5_vzalloc(outlen);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
|
||||
if (!err)
|
||||
*mtu = MLX5_GET(query_nic_vport_context_out, out,
|
||||
nic_vport_context.mtu);
|
||||
|
||||
kvfree(out);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
|
||||
|
||||
int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
|
||||
void *in;
|
||||
int err;
|
||||
|
||||
in = mlx5_vzalloc(inlen);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
|
||||
MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
|
||||
|
||||
err = mlx5_modify_nic_vport_context(mdev, in, inlen);
|
||||
|
||||
kvfree(in);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
|
||||
|
||||
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
|
||||
u32 vport,
|
||||
enum mlx5_list_type list_type,
|
||||
|
@ -37,8 +37,8 @@
|
||||
|
||||
#define _QLCNIC_LINUX_MAJOR 5
|
||||
#define _QLCNIC_LINUX_MINOR 3
|
||||
#define _QLCNIC_LINUX_SUBVERSION 63
|
||||
#define QLCNIC_LINUX_VERSIONID "5.3.63"
|
||||
#define _QLCNIC_LINUX_SUBVERSION 64
|
||||
#define QLCNIC_LINUX_VERSIONID "5.3.64"
|
||||
#define QLCNIC_DRV_IDC_VER 0x01
|
||||
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
|
||||
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
|
||||
|
@ -49,7 +49,6 @@ struct socfpga_dwmac {
|
||||
u32 reg_shift;
|
||||
struct device *dev;
|
||||
struct regmap *sys_mgr_base_addr;
|
||||
struct reset_control *stmmac_rst;
|
||||
void __iomem *splitter_base;
|
||||
bool f2h_ptp_ref_clk;
|
||||
};
|
||||
@ -92,15 +91,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
|
||||
struct device_node *np_splitter;
|
||||
struct resource res_splitter;
|
||||
|
||||
dwmac->stmmac_rst = devm_reset_control_get(dev,
|
||||
STMMAC_RESOURCE_NAME);
|
||||
if (IS_ERR(dwmac->stmmac_rst)) {
|
||||
dev_info(dev, "Could not get reset control!\n");
|
||||
if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
dwmac->stmmac_rst = NULL;
|
||||
}
|
||||
|
||||
dwmac->interface = of_get_phy_mode(np);
|
||||
|
||||
sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
|
||||
@ -194,30 +184,23 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct socfpga_dwmac *dwmac = priv;
|
||||
|
||||
/* On socfpga platform exit, assert and hold reset to the
|
||||
* enet controller - the default state after a hard reset.
|
||||
*/
|
||||
if (dwmac->stmmac_rst)
|
||||
reset_control_assert(dwmac->stmmac_rst);
|
||||
}
|
||||
|
||||
static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct socfpga_dwmac *dwmac = priv;
|
||||
struct socfpga_dwmac *dwmac = priv;
|
||||
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||
struct stmmac_priv *stpriv = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (ndev)
|
||||
stpriv = netdev_priv(ndev);
|
||||
if (!ndev)
|
||||
return -EINVAL;
|
||||
|
||||
stpriv = netdev_priv(ndev);
|
||||
if (!stpriv)
|
||||
return -EINVAL;
|
||||
|
||||
/* Assert reset to the enet controller before changing the phy mode */
|
||||
if (dwmac->stmmac_rst)
|
||||
reset_control_assert(dwmac->stmmac_rst);
|
||||
if (stpriv->stmmac_rst)
|
||||
reset_control_assert(stpriv->stmmac_rst);
|
||||
|
||||
/* Setup the phy mode in the system manager registers according to
|
||||
* devicetree configuration
|
||||
@ -227,8 +210,8 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
|
||||
/* Deassert reset for the phy configuration to be sampled by
|
||||
* the enet controller, and operation to start in requested mode
|
||||
*/
|
||||
if (dwmac->stmmac_rst)
|
||||
reset_control_deassert(dwmac->stmmac_rst);
|
||||
if (stpriv->stmmac_rst)
|
||||
reset_control_deassert(stpriv->stmmac_rst);
|
||||
|
||||
/* Before the enet controller is suspended, the phy is suspended.
|
||||
* This causes the phy clock to be gated. The enet controller is
|
||||
@ -245,7 +228,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
|
||||
* control register 0, and can be modified by the phy driver
|
||||
* framework.
|
||||
*/
|
||||
if (stpriv && stpriv->phydev)
|
||||
if (stpriv->phydev)
|
||||
phy_resume(stpriv->phydev);
|
||||
|
||||
return ret;
|
||||
@ -279,14 +262,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
|
||||
|
||||
plat_dat->bsp_priv = dwmac;
|
||||
plat_dat->init = socfpga_dwmac_init;
|
||||
plat_dat->exit = socfpga_dwmac_exit;
|
||||
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
|
||||
|
||||
ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
if (!ret)
|
||||
ret = socfpga_dwmac_init(pdev, dwmac);
|
||||
|
||||
return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct of_device_id socfpga_dwmac_match[] = {
|
||||
|
@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
||||
macsec_skb_cb(skb)->valid = false;
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
|
||||
if (!req) {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
hdr = (struct macsec_eth_header *)skb->data;
|
||||
@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
||||
skb = skb_unshare(skb, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
aead_request_free(req);
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
} else {
|
||||
/* integrity only: all headers + data authenticated */
|
||||
@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
|
||||
dev_hold(dev);
|
||||
ret = crypto_aead_decrypt(req);
|
||||
if (ret == -EINPROGRESS) {
|
||||
return NULL;
|
||||
return ERR_PTR(ret);
|
||||
} else if (ret != 0) {
|
||||
/* decryption/authentication failed
|
||||
* 10.6 if validateFrames is disabled, deliver anyway
|
||||
*/
|
||||
if (ret != -EBADMSG) {
|
||||
kfree_skb(skb);
|
||||
skb = NULL;
|
||||
skb = ERR_PTR(ret);
|
||||
}
|
||||
} else {
|
||||
macsec_skb_cb(skb)->valid = true;
|
||||
@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
|
||||
secy->validate_frames != MACSEC_VALIDATE_DISABLED)
|
||||
skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
|
||||
|
||||
if (!skb) {
|
||||
macsec_rxsa_put(rx_sa);
|
||||
if (IS_ERR(skb)) {
|
||||
/* the decrypt callback needs the reference */
|
||||
if (PTR_ERR(skb) != -EINPROGRESS)
|
||||
macsec_rxsa_put(rx_sa);
|
||||
rcu_read_unlock();
|
||||
*pskb = NULL;
|
||||
return RX_HANDLER_CONSUMED;
|
||||
@ -1161,7 +1163,8 @@ deliver:
|
||||
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
|
||||
macsec_reset_skb(skb, secy->netdev);
|
||||
|
||||
macsec_rxsa_put(rx_sa);
|
||||
if (rx_sa)
|
||||
macsec_rxsa_put(rx_sa);
|
||||
count_rx(dev, skb->len);
|
||||
|
||||
rcu_read_unlock();
|
||||
@ -1623,8 +1626,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
|
||||
if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len,
|
||||
secy->icv_len)) {
|
||||
if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
|
||||
secy->key_len, secy->icv_len)) {
|
||||
kfree(rx_sa);
|
||||
rtnl_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -1769,6 +1773,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
|
||||
tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
|
||||
if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
|
||||
secy->key_len, secy->icv_len)) {
|
||||
kfree(tx_sa);
|
||||
rtnl_unlock();
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -2274,7 +2279,7 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
|
||||
if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
|
||||
MACSEC_SECY_ATTR_PAD) ||
|
||||
nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
|
||||
DEFAULT_CIPHER_ID,
|
||||
MACSEC_DEFAULT_CIPHER_ID,
|
||||
MACSEC_SECY_ATTR_PAD) ||
|
||||
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
|
||||
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
|
||||
@ -2316,7 +2321,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
|
||||
if (!hdr)
|
||||
return -EMSGSIZE;
|
||||
|
||||
rtnl_lock();
|
||||
genl_dump_check_consistent(cb, hdr, &macsec_fam);
|
||||
|
||||
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
|
||||
goto nla_put_failure;
|
||||
@ -2482,18 +2487,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
|
||||
|
||||
nla_nest_end(skb, rxsc_list);
|
||||
|
||||
rtnl_unlock();
|
||||
|
||||
genlmsg_end(skb, hdr);
|
||||
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
rtnl_unlock();
|
||||
genlmsg_cancel(skb, hdr);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
static int macsec_generation = 1; /* protected by RTNL */
|
||||
|
||||
static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
@ -2503,6 +2507,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
dev_idx = cb->args[0];
|
||||
|
||||
d = 0;
|
||||
rtnl_lock();
|
||||
|
||||
cb->seq = macsec_generation;
|
||||
|
||||
for_each_netdev(net, dev) {
|
||||
struct macsec_secy *secy;
|
||||
|
||||
@ -2520,6 +2528,7 @@ next:
|
||||
}
|
||||
|
||||
done:
|
||||
rtnl_unlock();
|
||||
cb->args[0] = d;
|
||||
return skb->len;
|
||||
}
|
||||
@ -2973,10 +2982,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
|
||||
struct net_device *real_dev = macsec->real_dev;
|
||||
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
|
||||
|
||||
macsec_generation++;
|
||||
|
||||
unregister_netdevice_queue(dev, head);
|
||||
list_del_rcu(&macsec->secys);
|
||||
if (list_empty(&rxd->secys))
|
||||
if (list_empty(&rxd->secys)) {
|
||||
netdev_rx_handler_unregister(real_dev);
|
||||
kfree(rxd);
|
||||
}
|
||||
|
||||
macsec_del_dev(macsec);
|
||||
}
|
||||
@ -2998,8 +3011,10 @@ static int register_macsec_dev(struct net_device *real_dev,
|
||||
|
||||
err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
|
||||
rxd);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
kfree(rxd);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
list_add_tail_rcu(&macsec->secys, &rxd->secys);
|
||||
@ -3119,6 +3134,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
|
||||
if (err < 0)
|
||||
goto del_dev;
|
||||
|
||||
macsec_generation++;
|
||||
|
||||
dev_hold(real_dev);
|
||||
|
||||
return 0;
|
||||
@ -3132,7 +3149,7 @@ unregister:
|
||||
|
||||
static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
u64 csid = DEFAULT_CIPHER_ID;
|
||||
u64 csid = MACSEC_DEFAULT_CIPHER_ID;
|
||||
u8 icv_len = DEFAULT_ICV_LEN;
|
||||
int flag;
|
||||
bool es, scb, sci;
|
||||
@ -3147,8 +3164,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
|
||||
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
|
||||
|
||||
switch (csid) {
|
||||
case DEFAULT_CIPHER_ID:
|
||||
case DEFAULT_CIPHER_ALT:
|
||||
case MACSEC_DEFAULT_CIPHER_ID:
|
||||
case MACSEC_DEFAULT_CIPHER_ALT:
|
||||
if (icv_len < MACSEC_MIN_ICV_LEN ||
|
||||
icv_len > MACSEC_MAX_ICV_LEN)
|
||||
return -EINVAL;
|
||||
@ -3182,8 +3199,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
|
||||
nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
if ((data[IFLA_MACSEC_PROTECT] &&
|
||||
nla_get_u8(data[IFLA_MACSEC_PROTECT])) &&
|
||||
if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
|
||||
nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
|
||||
!data[IFLA_MACSEC_WINDOW])
|
||||
return -EINVAL;
|
||||
|
||||
@ -3223,7 +3240,7 @@ static int macsec_fill_info(struct sk_buff *skb,
|
||||
IFLA_MACSEC_PAD) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
|
||||
nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
|
||||
DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
|
||||
MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
|
||||
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
|
||||
|
@ -737,8 +737,19 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
|
||||
break;
|
||||
case CPU_PM_EXIT:
|
||||
case CPU_PM_ENTER_FAILED:
|
||||
/* Restore and enable the counter */
|
||||
armpmu_start(event, PERF_EF_RELOAD);
|
||||
/*
|
||||
* Restore and enable the counter.
|
||||
* armpmu_start() indirectly calls
|
||||
*
|
||||
* perf_event_update_userpage()
|
||||
*
|
||||
* that requires RCU read locking to be functional,
|
||||
* wrap the call within RCU_NONIDLE to make the
|
||||
* RCU subsystem aware this cpu is not idle from
|
||||
* an RCU perspective for the armpmu_start() call
|
||||
* duration.
|
||||
*/
|
||||
RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -86,6 +86,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
if (!dev->parent || !dev->parent->of_node)
|
||||
return -ENODEV;
|
||||
|
||||
dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
|
||||
if (IS_ERR(dp))
|
||||
return -ENOMEM;
|
||||
@ -104,9 +107,9 @@ static int rockchip_dp_phy_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
|
||||
dp->grf = syscon_node_to_regmap(dev->parent->of_node);
|
||||
if (IS_ERR(dp->grf)) {
|
||||
dev_err(dev, "rk3288-dp needs rockchip,grf property\n");
|
||||
dev_err(dev, "rk3288-dp needs the General Register Files syscon\n");
|
||||
return PTR_ERR(dp->grf);
|
||||
}
|
||||
|
||||
|
@ -176,7 +176,10 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
|
||||
struct regmap *grf;
|
||||
unsigned int reg_offset;
|
||||
|
||||
grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf");
|
||||
if (!dev->parent || !dev->parent->of_node)
|
||||
return -ENODEV;
|
||||
|
||||
grf = syscon_node_to_regmap(dev->parent->of_node);
|
||||
if (IS_ERR(grf)) {
|
||||
dev_err(dev, "Missing rockchip,grf property\n");
|
||||
return PTR_ERR(grf);
|
||||
|
@ -2,6 +2,7 @@ config PINCTRL_IMX
|
||||
bool
|
||||
select PINMUX
|
||||
select PINCONF
|
||||
select REGMAP
|
||||
|
||||
config PINCTRL_IMX1_CORE
|
||||
bool
|
||||
|
@ -1004,7 +1004,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
|
||||
struct mtk_pinctrl *pctl = dev_get_drvdata(chip->parent);
|
||||
int eint_num, virq, eint_offset;
|
||||
unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
|
||||
static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
|
||||
static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
|
||||
128000, 256000};
|
||||
const struct mtk_desc_pin *pin;
|
||||
struct irq_data *d;
|
||||
|
||||
@ -1022,9 +1023,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
|
||||
if (!mtk_eint_can_en_debounce(pctl, eint_num))
|
||||
return -ENOSYS;
|
||||
|
||||
dbnc = ARRAY_SIZE(dbnc_arr);
|
||||
for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
|
||||
if (debounce <= dbnc_arr[i]) {
|
||||
dbnc = ARRAY_SIZE(debounce_time);
|
||||
for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
|
||||
if (debounce <= debounce_time[i]) {
|
||||
dbnc = i;
|
||||
break;
|
||||
}
|
||||
|
@ -1280,9 +1280,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
|
||||
|
||||
/* Parse pins in each row from LSB */
|
||||
while (mask) {
|
||||
bit_pos = ffs(mask);
|
||||
bit_pos = __ffs(mask);
|
||||
pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
|
||||
mask_pos = ((pcs->fmask) << (bit_pos - 1));
|
||||
mask_pos = ((pcs->fmask) << bit_pos);
|
||||
val_pos = val & mask_pos;
|
||||
submask = mask & mask_pos;
|
||||
|
||||
@ -1852,7 +1852,7 @@ static int pcs_probe(struct platform_device *pdev)
|
||||
ret = of_property_read_u32(np, "pinctrl-single,function-mask",
|
||||
&pcs->fmask);
|
||||
if (!ret) {
|
||||
pcs->fshift = ffs(pcs->fmask) - 1;
|
||||
pcs->fshift = __ffs(pcs->fmask);
|
||||
pcs->fmax = pcs->fmask >> pcs->fshift;
|
||||
} else {
|
||||
/* If mask property doesn't exist, function mux is invalid. */
|
||||
|
@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
|
||||
{
|
||||
struct flowi6 fl;
|
||||
|
||||
memset(&fl, 0, sizeof(fl));
|
||||
if (saddr)
|
||||
memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
|
||||
if (daddr)
|
||||
|
@ -491,13 +491,14 @@ static int scpsys_probe(struct platform_device *pdev)
|
||||
genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
|
||||
|
||||
/*
|
||||
* With CONFIG_PM disabled turn on all domains to make the
|
||||
* hardware usable.
|
||||
* Initially turn on all domains to make the domains usable
|
||||
* with !CONFIG_PM and to get the hardware in sync with the
|
||||
* software. The unused domains will be switched off during
|
||||
* late_init time.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PM))
|
||||
genpd->power_on(genpd);
|
||||
genpd->power_on(genpd);
|
||||
|
||||
pm_genpd_init(genpd, NULL, true);
|
||||
pm_genpd_init(genpd, NULL, false);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -376,6 +376,8 @@ config MTK_THERMAL
|
||||
tristate "Temperature sensor driver for mediatek SoCs"
|
||||
depends on ARCH_MEDIATEK || COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
depends on NVMEM || NVMEM=n
|
||||
depends on RESET_CONTROLLER
|
||||
default y
|
||||
help
|
||||
Enable this option if you want to have support for thermal management
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include <linux/thermal.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/nvmem-consumer.h>
|
||||
|
||||
/* AUXADC Registers */
|
||||
#define AUXADC_CON0_V 0x000
|
||||
@ -619,7 +618,7 @@ static struct platform_driver mtk_thermal_driver = {
|
||||
|
||||
module_platform_driver(mtk_thermal_driver);
|
||||
|
||||
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
|
||||
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
|
||||
MODULE_AUTHOR("Hanyi Wu <hanyi.wu@mediatek.com>");
|
||||
MODULE_DESCRIPTION("Mediatek thermal driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -803,8 +803,8 @@ static int thermal_of_populate_trip(struct device_node *np,
|
||||
* otherwise, it returns a corresponding ERR_PTR(). Caller must
|
||||
* check the return value with help of IS_ERR() helper.
|
||||
*/
|
||||
static struct __thermal_zone *
|
||||
thermal_of_build_thermal_zone(struct device_node *np)
|
||||
static struct __thermal_zone
|
||||
__init *thermal_of_build_thermal_zone(struct device_node *np)
|
||||
{
|
||||
struct device_node *child = NULL, *gchild;
|
||||
struct __thermal_zone *tz;
|
||||
|
@ -301,7 +301,7 @@ static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors,
|
||||
capped_extra_power = 0;
|
||||
extra_power = 0;
|
||||
for (i = 0; i < num_actors; i++) {
|
||||
u64 req_range = req_power[i] * power_range;
|
||||
u64 req_range = (u64)req_power[i] * power_range;
|
||||
|
||||
granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range,
|
||||
total_req_power);
|
||||
|
@ -688,7 +688,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
|
||||
{
|
||||
struct thermal_zone_device *tz = to_thermal_zone(dev);
|
||||
int trip, ret;
|
||||
unsigned long temperature;
|
||||
int temperature;
|
||||
|
||||
if (!tz->ops->set_trip_temp)
|
||||
return -EPERM;
|
||||
@ -696,7 +696,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
|
||||
if (!sscanf(attr->attr.name, "trip_point_%d_temp", &trip))
|
||||
return -EINVAL;
|
||||
|
||||
if (kstrtoul(buf, 10, &temperature))
|
||||
if (kstrtoint(buf, 10, &temperature))
|
||||
return -EINVAL;
|
||||
|
||||
ret = tz->ops->set_trip_temp(tz, trip, temperature);
|
||||
@ -899,9 +899,9 @@ emul_temp_store(struct device *dev, struct device_attribute *attr,
|
||||
{
|
||||
struct thermal_zone_device *tz = to_thermal_zone(dev);
|
||||
int ret = 0;
|
||||
unsigned long temperature;
|
||||
int temperature;
|
||||
|
||||
if (kstrtoul(buf, 10, &temperature))
|
||||
if (kstrtoint(buf, 10, &temperature))
|
||||
return -EINVAL;
|
||||
|
||||
if (!tz->ops->set_emul_temp) {
|
||||
|
@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
|
||||
*/
|
||||
|
||||
static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
|
||||
struct inode *ptm_inode, int idx)
|
||||
struct file *file, int idx)
|
||||
{
|
||||
/* Master must be open via /dev/ptmx */
|
||||
return ERR_PTR(-EIO);
|
||||
@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
|
||||
*/
|
||||
|
||||
static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
|
||||
struct inode *pts_inode, int idx)
|
||||
struct file *file, int idx)
|
||||
{
|
||||
struct tty_struct *tty;
|
||||
|
||||
mutex_lock(&devpts_mutex);
|
||||
tty = devpts_get_priv(pts_inode);
|
||||
tty = devpts_get_priv(file->f_path.dentry);
|
||||
mutex_unlock(&devpts_mutex);
|
||||
/* Master must be open before slave */
|
||||
if (!tty)
|
||||
@ -722,7 +722,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct pts_fs_info *fsi;
|
||||
struct tty_struct *tty;
|
||||
struct inode *slave_inode;
|
||||
struct dentry *dentry;
|
||||
int retval;
|
||||
int index;
|
||||
|
||||
@ -769,14 +769,12 @@ static int ptmx_open(struct inode *inode, struct file *filp)
|
||||
|
||||
tty_add_file(tty, filp);
|
||||
|
||||
slave_inode = devpts_pty_new(fsi,
|
||||
MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
|
||||
tty->link);
|
||||
if (IS_ERR(slave_inode)) {
|
||||
retval = PTR_ERR(slave_inode);
|
||||
dentry = devpts_pty_new(fsi, index, tty->link);
|
||||
if (IS_ERR(dentry)) {
|
||||
retval = PTR_ERR(dentry);
|
||||
goto err_release;
|
||||
}
|
||||
tty->link->driver_data = slave_inode;
|
||||
tty->link->driver_data = dentry;
|
||||
|
||||
retval = ptm_driver->ops->open(tty, filp);
|
||||
if (retval)
|
||||
|
@ -1403,9 +1403,18 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
|
||||
/*
|
||||
* Empty the RX FIFO, we are not interested in anything
|
||||
* received during the half-duplex transmission.
|
||||
* Enable previously disabled RX interrupts.
|
||||
*/
|
||||
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX))
|
||||
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
|
||||
serial8250_clear_fifos(p);
|
||||
|
||||
serial8250_rpm_get(p);
|
||||
|
||||
p->ier |= UART_IER_RLSI | UART_IER_RDI;
|
||||
serial_port_out(&p->port, UART_IER, p->ier);
|
||||
|
||||
serial8250_rpm_put(p);
|
||||
}
|
||||
}
|
||||
|
||||
static void serial8250_em485_handle_stop_tx(unsigned long arg)
|
||||
|
@ -324,7 +324,6 @@ config SERIAL_8250_EM
|
||||
config SERIAL_8250_RT288X
|
||||
bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support"
|
||||
depends on SERIAL_8250
|
||||
depends on MIPS || COMPILE_TEST
|
||||
default y if MIPS_ALCHEMY || SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620
|
||||
help
|
||||
Selecting this option will add support for the alternate register
|
||||
|
@ -72,7 +72,7 @@ static void uartlite_outbe32(u32 val, void __iomem *addr)
|
||||
iowrite32be(val, addr);
|
||||
}
|
||||
|
||||
static const struct uartlite_reg_ops uartlite_be = {
|
||||
static struct uartlite_reg_ops uartlite_be = {
|
||||
.in = uartlite_inbe32,
|
||||
.out = uartlite_outbe32,
|
||||
};
|
||||
@ -87,21 +87,21 @@ static void uartlite_outle32(u32 val, void __iomem *addr)
|
||||
iowrite32(val, addr);
|
||||
}
|
||||
|
||||
static const struct uartlite_reg_ops uartlite_le = {
|
||||
static struct uartlite_reg_ops uartlite_le = {
|
||||
.in = uartlite_inle32,
|
||||
.out = uartlite_outle32,
|
||||
};
|
||||
|
||||
static inline u32 uart_in32(u32 offset, struct uart_port *port)
|
||||
{
|
||||
const struct uartlite_reg_ops *reg_ops = port->private_data;
|
||||
struct uartlite_reg_ops *reg_ops = port->private_data;
|
||||
|
||||
return reg_ops->in(port->membase + offset);
|
||||
}
|
||||
|
||||
static inline void uart_out32(u32 val, u32 offset, struct uart_port *port)
|
||||
{
|
||||
const struct uartlite_reg_ops *reg_ops = port->private_data;
|
||||
struct uartlite_reg_ops *reg_ops = port->private_data;
|
||||
|
||||
reg_ops->out(val, port->membase + offset);
|
||||
}
|
||||
|
@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
|
||||
* Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
|
||||
*/
|
||||
static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
|
||||
struct inode *inode, int idx)
|
||||
struct file *file, int idx)
|
||||
{
|
||||
struct tty_struct *tty;
|
||||
|
||||
if (driver->ops->lookup)
|
||||
tty = driver->ops->lookup(driver, inode, idx);
|
||||
tty = driver->ops->lookup(driver, file, idx);
|
||||
else
|
||||
tty = driver->ttys[idx];
|
||||
|
||||
@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
|
||||
}
|
||||
|
||||
/* check whether we're reopening an existing tty */
|
||||
tty = tty_driver_lookup_tty(driver, inode, index);
|
||||
tty = tty_driver_lookup_tty(driver, filp, index);
|
||||
if (IS_ERR(tty)) {
|
||||
mutex_unlock(&tty_mutex);
|
||||
goto out;
|
||||
|
@ -1150,6 +1150,11 @@ static int dwc3_suspend(struct device *dev)
|
||||
phy_exit(dwc->usb2_generic_phy);
|
||||
phy_exit(dwc->usb3_generic_phy);
|
||||
|
||||
usb_phy_set_suspend(dwc->usb2_phy, 1);
|
||||
usb_phy_set_suspend(dwc->usb3_phy, 1);
|
||||
WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
|
||||
WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
|
||||
|
||||
pinctrl_pm_select_sleep_state(dev);
|
||||
|
||||
return 0;
|
||||
@ -1163,11 +1168,21 @@ static int dwc3_resume(struct device *dev)
|
||||
|
||||
pinctrl_pm_select_default_state(dev);
|
||||
|
||||
usb_phy_set_suspend(dwc->usb2_phy, 0);
|
||||
usb_phy_set_suspend(dwc->usb3_phy, 0);
|
||||
ret = phy_power_on(dwc->usb2_generic_phy);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = phy_power_on(dwc->usb3_generic_phy);
|
||||
if (ret < 0)
|
||||
goto err_usb2phy_power;
|
||||
|
||||
usb_phy_init(dwc->usb3_phy);
|
||||
usb_phy_init(dwc->usb2_phy);
|
||||
ret = phy_init(dwc->usb2_generic_phy);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err_usb3phy_power;
|
||||
|
||||
ret = phy_init(dwc->usb3_generic_phy);
|
||||
if (ret < 0)
|
||||
@ -1200,6 +1215,12 @@ static int dwc3_resume(struct device *dev)
|
||||
err_usb2phy_init:
|
||||
phy_exit(dwc->usb2_generic_phy);
|
||||
|
||||
err_usb3phy_power:
|
||||
phy_power_off(dwc->usb3_generic_phy);
|
||||
|
||||
err_usb2phy_power:
|
||||
phy_power_off(dwc->usb2_generic_phy);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -645,7 +645,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
|
||||
file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
|
||||
if (!file) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
|
||||
@ -653,7 +653,7 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
|
||||
dwc, &dwc3_mode_fops);
|
||||
if (!file) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
goto err2;
|
||||
}
|
||||
}
|
||||
|
||||
@ -663,19 +663,22 @@ int dwc3_debugfs_init(struct dwc3 *dwc)
|
||||
dwc, &dwc3_testmode_fops);
|
||||
if (!file) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
file = debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root,
|
||||
dwc, &dwc3_link_state_fops);
|
||||
if (!file) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
goto err2;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
kfree(dwc->regset);
|
||||
|
||||
err1:
|
||||
debugfs_remove_recursive(root);
|
||||
|
||||
@ -686,5 +689,5 @@ err0:
|
||||
void dwc3_debugfs_exit(struct dwc3 *dwc)
|
||||
{
|
||||
debugfs_remove_recursive(dwc->root);
|
||||
dwc->root = NULL;
|
||||
kfree(dwc->regset);
|
||||
}
|
||||
|
@ -496,7 +496,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "get_sync failed with err %d\n", ret);
|
||||
goto err0;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
dwc3_omap_map_offset(omap);
|
||||
@ -516,28 +516,24 @@ static int dwc3_omap_probe(struct platform_device *pdev)
|
||||
|
||||
ret = dwc3_omap_extcon_register(omap);
|
||||
if (ret < 0)
|
||||
goto err2;
|
||||
goto err1;
|
||||
|
||||
ret = of_platform_populate(node, NULL, NULL, dev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to create dwc3 core\n");
|
||||
goto err3;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
dwc3_omap_enable_irqs(omap);
|
||||
|
||||
return 0;
|
||||
|
||||
err3:
|
||||
err2:
|
||||
extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
|
||||
extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
|
||||
err2:
|
||||
dwc3_omap_disable_irqs(omap);
|
||||
|
||||
err1:
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
err0:
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
return ret;
|
||||
|
@ -2936,6 +2936,9 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
|
||||
|
||||
int dwc3_gadget_suspend(struct dwc3 *dwc)
|
||||
{
|
||||
if (!dwc->gadget_driver)
|
||||
return 0;
|
||||
|
||||
if (dwc->pullups_connected) {
|
||||
dwc3_gadget_disable_irq(dwc);
|
||||
dwc3_gadget_run_stop(dwc, true, true);
|
||||
@ -2954,6 +2957,9 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
|
||||
struct dwc3_ep *dep;
|
||||
int ret;
|
||||
|
||||
if (!dwc->gadget_driver)
|
||||
return 0;
|
||||
|
||||
/* Start with SuperSpeed Default */
|
||||
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user