diff --git a/CREDITS b/CREDITS index 837367624e45..d7ebdfbc4d4f 100644 --- a/CREDITS +++ b/CREDITS @@ -9,7 +9,7 @@ Linus ---------- -M: Matt Mackal +N: Matt Mackal E: mpm@selenic.com D: SLOB slab allocator @@ -1910,7 +1910,7 @@ S: Ra'annana, Israel N: Andi Kleen E: andi@firstfloor.org -U: http://www.halobates.de +W: http://www.halobates.de D: network, x86, NUMA, various hacks S: Schwalbenstr. 96 S: 85551 Ottobrunn @@ -2089,8 +2089,8 @@ D: ST Microelectronics SPEAr13xx PCI host bridge driver D: Synopsys Designware PCI host bridge driver N: Gabor Kuti -M: seasons@falcon.sch.bme.hu -M: seasons@makosteszta.sote.hu +E: seasons@falcon.sch.bme.hu +E: seasons@makosteszta.sote.hu D: Original author of software suspend N: Jaroslav Kysela diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt similarity index 85% rename from Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt rename to Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt index fbbacd958240..6f28969af9dc 100644 --- a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt +++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt @@ -6,7 +6,7 @@ perform in-band IPMI communication with their host. Required properties: -- compatible : should be "aspeed,ast2400-bt-bmc" +- compatible : should be "aspeed,ast2400-ibt-bmc" - reg: physical address and size of the registers Optional properties: @@ -17,7 +17,7 @@ Optional properties: Example: ibt@1e789140 { - compatible = "aspeed,ast2400-bt-bmc"; + compatible = "aspeed,ast2400-ibt-bmc"; reg = <0x1e789140 0x18>; interrupts = <8>; }; diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt index e1d76812419c..05150957ecfd 100644 --- a/Documentation/devicetree/bindings/net/ethernet.txt +++ b/Documentation/devicetree/bindings/net/ethernet.txt @@ -9,10 +9,26 @@ The following properties are common to the Ethernet controllers: - max-speed: number, specifies maximum speed in Mbit/s supported by the device; - max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than the maximum frame size (there's contradiction in ePAPR). -- phy-mode: string, operation mode of the PHY interface; supported values are - "mii", "gmii", "sgmii", "qsgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id", - "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii", "trgmii"; this is now a - de-facto standard property; +- phy-mode: string, operation mode of the PHY interface. This is now a de-facto + standard property; supported values are: + * "mii" + * "gmii" + * "sgmii" + * "qsgmii" + * "tbi" + * "rev-mii" + * "rmii" + * "rgmii" (RX and TX delays are added by the MAC when required) + * "rgmii-id" (RGMII with internal RX and TX delays provided by the PHY, the + MAC should not add the RX or TX delays in this case) + * "rgmii-rxid" (RGMII with internal RX delay provided by the PHY, the MAC + should not add an RX delay in this case) + * "rgmii-txid" (RGMII with internal TX delay provided by the PHY, the MAC + should not add an TX delay in this case) + * "rtbi" + * "smii" + * "xgmii" + * "trgmii" - phy-connection-type: the same as "phy-mode" property but described in ePAPR; - phy-handle: phandle, specifies a reference to a node representing a PHY device; this property is described in ePAPR and so preferred; diff --git a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt index fd40c852d7c7..462b04e8209f 100644 --- a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt +++ b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt @@ -12,7 +12,7 @@ Required properties: Optional properties: - ti,dmic: phandle for the OMAP dmic node if the machine have it connected -- ti,jack_detection: Need to be present if the board capable to detect jack +- ti,jack-detection: Need to be present if the board capable to detect jack insertion, removal. Available audio endpoints for the audio-routing table: diff --git a/Documentation/i2c/i2c-topology b/Documentation/i2c/i2c-topology index e0aefeece551..1a014fede0b7 100644 --- a/Documentation/i2c/i2c-topology +++ b/Documentation/i2c/i2c-topology @@ -326,7 +326,7 @@ Two parent-locked sibling muxes This is a good topology. - .--------. + .--------. .----------. .--| dev D1 | | parent- |--' '--------' .--| locked | .--------. @@ -350,7 +350,7 @@ Mux-locked and parent-locked sibling muxes This is a good topology. - .--------. + .--------. .----------. .--| dev D1 | | mux- |--' '--------' .--| locked | .--------. diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 6d6c07cf1a9a..63912ef34606 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and Switch tagging protocols ------------------------ -DSA currently supports 4 different tagging protocols, and a tag-less mode as +DSA currently supports 5 different tagging protocols, and a tag-less mode as well. The different protocols are implemented in: net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) net/dsa/tag_dsa.c: Marvell's original DSA tag net/dsa/tag_edsa.c: Marvell's enhanced DSA tag net/dsa/tag_brcm.c: Broadcom's 4 bytes tag +net/dsa/tag_qca.c: Qualcomm's 2 bytes tag The exact format of the tag protocol is vendor specific, but in general, they all contain something which: diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt index 399e4e866a9c..433b6724797a 100644 --- a/Documentation/networking/nf_conntrack-sysctl.txt +++ b/Documentation/networking/nf_conntrack-sysctl.txt @@ -62,10 +62,13 @@ nf_conntrack_generic_timeout - INTEGER (seconds) protocols. nf_conntrack_helper - BOOLEAN - 0 - disabled - not 0 - enabled (default) + 0 - disabled (default) + not 0 - enabled Enable automatic conntrack helper assignment. + If disabled it is required to set up iptables rules to assign + helpers to connections. See the CT target description in the + iptables-extensions(8) man page for further information. nf_conntrack_icmp_timeout - INTEGER (seconds) default 30 diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 739db9ab16b2..6bbceb9a3a19 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -777,6 +777,17 @@ Gets the current timestamp of kvmclock as seen by the current guest. In conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios such as migration. +When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the +set of bits that KVM can return in struct kvm_clock_data's flag member. + +The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned +value is the exact kvmclock value seen by all VCPUs at the instant +when KVM_GET_CLOCK was called. If clear, the returned value is simply +CLOCK_MONOTONIC plus a constant offset; the offset can be modified +with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock, +but the exact value read by each VCPU could differ, because the host +TSC is not stable. + struct kvm_clock_data { __u64 clock; /* kvmclock current value */ __u32 flags; diff --git a/MAINTAINERS b/MAINTAINERS index 30c873192458..f7fd00ef1323 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -77,6 +77,7 @@ Descriptions of section entries: Q: Patchwork web based patch tracking system site T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit + B: Bug tracking system location. S: Status, one of the following: Supported: Someone is actually paid to look after this. Maintained: Someone actually looks after it. @@ -281,6 +282,7 @@ L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi Q: https://patchwork.kernel.org/project/linux-acpi/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm +B: https://bugzilla.kernel.org S: Supported F: drivers/acpi/ F: drivers/pnp/pnpacpi/ @@ -304,6 +306,8 @@ W: https://acpica.org/ W: https://github.com/acpica/acpica/ Q: https://patchwork.kernel.org/project/linux-acpi/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm +B: https://bugzilla.kernel.org +B: https://bugs.acpica.org S: Supported F: drivers/acpi/acpica/ F: include/acpi/ @@ -313,6 +317,7 @@ ACPI FAN DRIVER M: Zhang Rui L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi +B: https://bugzilla.kernel.org S: Supported F: drivers/acpi/fan.c @@ -328,6 +333,7 @@ ACPI THERMAL DRIVER M: Zhang Rui L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi +B: https://bugzilla.kernel.org S: Supported F: drivers/acpi/*thermal* @@ -335,6 +341,7 @@ ACPI VIDEO DRIVER M: Zhang Rui L: linux-acpi@vger.kernel.org W: https://01.org/linux-acpi +B: https://bugzilla.kernel.org S: Supported F: drivers/acpi/acpi_video.c @@ -5701,6 +5708,7 @@ HIBERNATION (aka Software Suspend, aka swsusp) M: "Rafael J. Wysocki" M: Pavel Machek L: linux-pm@vger.kernel.org +B: https://bugzilla.kernel.org S: Supported F: arch/x86/power/ F: drivers/base/power/ @@ -7122,6 +7130,7 @@ F: drivers/scsi/53c700* LED SUBSYSTEM M: Richard Purdie M: Jacek Anaszewski +M: Pavel Machek L: linux-leds@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git S: Maintained @@ -8095,6 +8104,7 @@ F: drivers/infiniband/hw/mlx4/ F: include/linux/mlx4/ MELLANOX MLX5 core VPI driver +M: Saeed Mahameed M: Matan Barak M: Leon Romanovsky L: netdev@vger.kernel.org @@ -9285,11 +9295,12 @@ S: Maintained F: drivers/pci/host/*layerscape* PCI DRIVER FOR IMX6 -M: Richard Zhu +M: Richard Zhu M: Lucas Stach L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +F: Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt F: drivers/pci/host/*imx6* PCI DRIVER FOR TI KEYSTONE @@ -9348,17 +9359,11 @@ F: drivers/pci/host/pci-exynos.c PCI DRIVER FOR SYNOPSIS DESIGNWARE M: Jingoo Han -M: Pratyush Anand -L: linux-pci@vger.kernel.org -S: Maintained -F: drivers/pci/host/*designware* - -PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE -M: Jose Abreu +M: Joao Pinto L: linux-pci@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/pci/designware-pcie.txt -F: drivers/pci/host/pcie-designware-plat.c +F: drivers/pci/host/*designware* PCI DRIVER FOR GENERIC OF HOSTS M: Will Deacon @@ -9660,6 +9665,7 @@ POWER MANAGEMENT CORE M: "Rafael J. Wysocki" L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm +B: https://bugzilla.kernel.org S: Supported F: drivers/base/power/ F: include/linux/pm.h @@ -11649,6 +11655,7 @@ M: "Rafael J. Wysocki" M: Len Brown M: Pavel Machek L: linux-pm@vger.kernel.org +B: https://bugzilla.kernel.org S: Supported F: Documentation/power/ F: arch/x86/kernel/acpi/ diff --git a/Makefile b/Makefile index 247430abfc73..369099dc0fae 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 9 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc8 NAME = Psychotic Stoned Sheep # *DOCUMENTATION* @@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ -fno-strict-aliasing -fno-common \ -Werror-implicit-function-declaration \ -Wno-format-security \ - -std=gnu89 + -std=gnu89 $(call cc-option,-fno-PIE) + KBUILD_AFLAGS_KERNEL := KBUILD_CFLAGS_KERNEL := -KBUILD_AFLAGS := -D__ASSEMBLY__ +KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE) KBUILD_AFLAGS_MODULE := -DMODULE KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds @@ -606,6 +607,13 @@ else include/config/auto.conf: ; endif # $(dot-config) +# For the kernel to actually contain only the needed exported symbols, +# we have to build modules as well to determine what those symbols are. +# (this can be evaluated only once include/config/auto.conf has been included) +ifdef CONFIG_TRIM_UNUSED_KSYMS + KBUILD_MODULES := 1 +endif + # The all: target is the default when no target is given on the # command line. # This allow a user to issue only 'make' to build a kernel including modules @@ -943,7 +951,7 @@ ifdef CONFIG_GDB_SCRIPTS endif ifdef CONFIG_TRIM_UNUSED_KSYMS $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \ - "$(MAKE) KBUILD_MODULES=1 -f $(srctree)/Makefile vmlinux_prereq" + "$(MAKE) -f $(srctree)/Makefile vmlinux" endif # standalone target for easier testing @@ -1018,8 +1026,6 @@ prepare2: prepare3 prepare-compiler-check outputmakefile asm-generic prepare1: prepare2 $(version_h) include/generated/utsrelease.h \ include/config/auto.conf $(cmd_crmodverdir) - $(Q)test -e include/generated/autoksyms.h || \ - touch include/generated/autoksyms.h archprepare: archheaders archscripts prepare1 scripts_basic diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h index 08e7e2a16ac1..a36e8601114d 100644 --- a/arch/arc/include/asm/delay.h +++ b/arch/arc/include/asm/delay.h @@ -22,10 +22,11 @@ static inline void __delay(unsigned long loops) { __asm__ __volatile__( - " lp 1f \n" - " nop \n" - "1: \n" - : "+l"(loops)); + " mov lp_count, %0 \n" + " lp 1f \n" + " nop \n" + "1: \n" + : : "r"(loops)); } extern void __bad_udelay(void); diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 89eeb3720051..e94ca72b974e 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) #define pte_page(pte) pfn_to_page(pte_pfn(pte)) #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) -#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) +#define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 2b96cfc3be75..50d71695cd4e 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -23,7 +23,7 @@ static int l2_line_sz; static int ioc_exists; -int slc_enable = 1, ioc_enable = 1; +int slc_enable = 1, ioc_enable = 0; unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */ unsigned long perip_end = 0xFFFFFFFF; /* legacy value */ diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index befcd2619902..c558ba75cbcc 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -745,7 +745,6 @@ dtb-$(CONFIG_MACH_SUN4I) += \ sun4i-a10-pcduino2.dtb \ sun4i-a10-pov-protab2-ips9.dtb dtb-$(CONFIG_MACH_SUN5I) += \ - ntc-gr8-evb.dtb \ sun5i-a10s-auxtek-t003.dtb \ sun5i-a10s-auxtek-t004.dtb \ sun5i-a10s-mk802.dtb \ @@ -761,6 +760,7 @@ dtb-$(CONFIG_MACH_SUN5I) += \ sun5i-a13-olinuxino-micro.dtb \ sun5i-a13-q8-tablet.dtb \ sun5i-a13-utoo-p66.dtb \ + sun5i-gr8-evb.dtb \ sun5i-r8-chip.dtb dtb-$(CONFIG_MACH_SUN6I) += \ sun6i-a31-app4-evb1.dtb \ diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts index dec4b073ceb1..379939699164 100644 --- a/arch/arm/boot/dts/imx53-qsb.dts +++ b/arch/arm/boot/dts/imx53-qsb.dts @@ -64,8 +64,8 @@ }; ldo3_reg: ldo3 { - regulator-min-microvolt = <600000>; - regulator-max-microvolt = <1800000>; + regulator-min-microvolt = <1725000>; + regulator-max-microvolt = <3300000>; regulator-always-on; }; @@ -76,8 +76,8 @@ }; ldo5_reg: ldo5 { - regulator-min-microvolt = <1725000>; - regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3600000>; regulator-always-on; }; @@ -100,14 +100,14 @@ }; ldo9_reg: ldo9 { - regulator-min-microvolt = <1200000>; + regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3600000>; regulator-always-on; }; ldo10_reg: ldo10 { - regulator-min-microvolt = <1250000>; - regulator-max-microvolt = <3650000>; + regulator-min-microvolt = <1200000>; + regulator-max-microvolt = <3600000>; regulator-always-on; }; }; diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index 0ff1c2de95bf..26cce4d18405 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi @@ -13,6 +13,11 @@ }; }; + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0>; + }; + wl12xx_vmmc: wl12xx_vmmc { compatible = "regulator-fixed"; regulator-name = "vwl1271"; diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 731ec37aed5b..8f9a69ca818c 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -13,9 +13,9 @@ }; }; - memory@0 { + memory@80000000 { device_type = "memory"; - reg = <0 0>; + reg = <0x80000000 0>; }; leds { diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index 6365635fea5c..4caadb253249 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -124,6 +124,7 @@ compatible = "ti,abe-twl6040"; ti,model = "omap5-uevm"; + ti,jack-detection; ti,mclk-freq = <19200000>; ti,mcpdm = <&mcpdm>; @@ -415,7 +416,7 @@ ti,backup-battery-charge-high-current; }; - gpadc { + gpadc: gpadc { compatible = "ti,palmas-gpadc"; interrupts = <18 0 16 0 @@ -475,8 +476,8 @@ smps6_reg: smps6 { /* VDD_DDR3 - over VDD_SMPS6 */ regulator-name = "smps6"; - regulator-min-microvolt = <1200000>; - regulator-max-microvolt = <1200000>; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; regulator-always-on; regulator-boot-on; }; diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi index 91096a49efa9..8f79b4147bba 100644 --- a/arch/arm/boot/dts/stih407-family.dtsi +++ b/arch/arm/boot/dts/stih407-family.dtsi @@ -283,6 +283,8 @@ clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c0_default>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; @@ -296,6 +298,8 @@ clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c1_default>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; @@ -309,6 +313,8 @@ clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c2_default>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; @@ -322,6 +328,8 @@ clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c3_default>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; @@ -335,6 +343,8 @@ clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c4_default>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; @@ -348,6 +358,8 @@ clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c5_default>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; @@ -363,6 +375,8 @@ clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c10_default>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; @@ -376,6 +390,8 @@ clock-frequency = <400000>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_i2c11_default>; + #address-cells = <1>; + #size-cells = <0>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/stih410-b2260.dts b/arch/arm/boot/dts/stih410-b2260.dts index ef2ff2f518f6..7fb507fcba7e 100644 --- a/arch/arm/boot/dts/stih410-b2260.dts +++ b/arch/arm/boot/dts/stih410-b2260.dts @@ -74,7 +74,7 @@ /* Low speed expansion connector */ spi0: spi@9844000 { label = "LS-SPI0"; - cs-gpio = <&pio30 3 0>; + cs-gpios = <&pio30 3 0>; status = "okay"; }; diff --git a/arch/arm/boot/dts/ntc-gr8-evb.dts b/arch/arm/boot/dts/sun5i-gr8-evb.dts similarity index 99% rename from arch/arm/boot/dts/ntc-gr8-evb.dts rename to arch/arm/boot/dts/sun5i-gr8-evb.dts index 4b622f3b5220..714381fd64d7 100644 --- a/arch/arm/boot/dts/ntc-gr8-evb.dts +++ b/arch/arm/boot/dts/sun5i-gr8-evb.dts @@ -44,7 +44,7 @@ */ /dts-v1/; -#include "ntc-gr8.dtsi" +#include "sun5i-gr8.dtsi" #include "sunxi-common-regulators.dtsi" #include diff --git a/arch/arm/boot/dts/ntc-gr8.dtsi b/arch/arm/boot/dts/sun5i-gr8.dtsi similarity index 100% rename from arch/arm/boot/dts/ntc-gr8.dtsi rename to arch/arm/boot/dts/sun5i-gr8.dtsi diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index 48fc24f36fcb..300a1bd5a6ec 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi @@ -282,11 +282,15 @@ uart1_pins_a: uart1@0 { allwinner,pins = "PG6", "PG7"; allwinner,function = "uart1"; + allwinner,drive = ; + allwinner,pull = ; }; uart1_pins_cts_rts_a: uart1-cts-rts@0 { allwinner,pins = "PG8", "PG9"; allwinner,function = "uart1"; + allwinner,drive = ; + allwinner,pull = ; }; mmc0_pins_a: mmc0@0 { diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 0745538b26d3..55e0e3ea9cb6 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -8,7 +8,6 @@ generic-y += early_ioremap.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h -generic-y += export.h generic-y += ioctl.h generic-y += ipcbuf.h generic-y += irq_regs.h diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 68c2c097cffe..ad325a8c7e1e 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -33,7 +33,7 @@ endif obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_ISA_DMA_API) += dma.o obj-$(CONFIG_FIQ) += fiq.o fiqasm.o -obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_MODULES) += armksyms.o module.o obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_PCI) += bios32.o isa.o diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c new file mode 100644 index 000000000000..7e45f69a0ddc --- /dev/null +++ b/arch/arm/kernel/armksyms.c @@ -0,0 +1,183 @@ +/* + * linux/arch/arm/kernel/armksyms.c + * + * Copyright (C) 2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * libgcc functions - functions that are used internally by the + * compiler... (prototypes are not correct though, but that + * doesn't really matter since they're not versioned). + */ +extern void __ashldi3(void); +extern void __ashrdi3(void); +extern void __divsi3(void); +extern void __lshrdi3(void); +extern void __modsi3(void); +extern void __muldi3(void); +extern void __ucmpdi2(void); +extern void __udivsi3(void); +extern void __umodsi3(void); +extern void __do_div64(void); +extern void __bswapsi2(void); +extern void __bswapdi2(void); + +extern void __aeabi_idiv(void); +extern void __aeabi_idivmod(void); +extern void __aeabi_lasr(void); +extern void __aeabi_llsl(void); +extern void __aeabi_llsr(void); +extern void __aeabi_lmul(void); +extern void __aeabi_uidiv(void); +extern void __aeabi_uidivmod(void); +extern void __aeabi_ulcmp(void); + +extern void fpundefinstr(void); + +void mmioset(void *, unsigned int, size_t); +void mmiocpy(void *, const void *, size_t); + + /* platform dependent support */ +EXPORT_SYMBOL(arm_delay_ops); + + /* networking */ +EXPORT_SYMBOL(csum_partial); +EXPORT_SYMBOL(csum_partial_copy_from_user); +EXPORT_SYMBOL(csum_partial_copy_nocheck); +EXPORT_SYMBOL(__csum_ipv6_magic); + + /* io */ +#ifndef __raw_readsb +EXPORT_SYMBOL(__raw_readsb); +#endif +#ifndef __raw_readsw +EXPORT_SYMBOL(__raw_readsw); +#endif +#ifndef __raw_readsl +EXPORT_SYMBOL(__raw_readsl); +#endif +#ifndef __raw_writesb +EXPORT_SYMBOL(__raw_writesb); +#endif +#ifndef __raw_writesw +EXPORT_SYMBOL(__raw_writesw); +#endif +#ifndef __raw_writesl +EXPORT_SYMBOL(__raw_writesl); +#endif + + /* string / mem functions */ +EXPORT_SYMBOL(strchr); +EXPORT_SYMBOL(strrchr); +EXPORT_SYMBOL(memset); +EXPORT_SYMBOL(memcpy); +EXPORT_SYMBOL(memmove); +EXPORT_SYMBOL(memchr); +EXPORT_SYMBOL(__memzero); + +EXPORT_SYMBOL(mmioset); +EXPORT_SYMBOL(mmiocpy); + +#ifdef CONFIG_MMU +EXPORT_SYMBOL(copy_page); + +EXPORT_SYMBOL(arm_copy_from_user); +EXPORT_SYMBOL(arm_copy_to_user); +EXPORT_SYMBOL(arm_clear_user); + +EXPORT_SYMBOL(__get_user_1); +EXPORT_SYMBOL(__get_user_2); +EXPORT_SYMBOL(__get_user_4); +EXPORT_SYMBOL(__get_user_8); + +#ifdef __ARMEB__ +EXPORT_SYMBOL(__get_user_64t_1); +EXPORT_SYMBOL(__get_user_64t_2); +EXPORT_SYMBOL(__get_user_64t_4); +EXPORT_SYMBOL(__get_user_32t_8); +#endif + +EXPORT_SYMBOL(__put_user_1); +EXPORT_SYMBOL(__put_user_2); +EXPORT_SYMBOL(__put_user_4); +EXPORT_SYMBOL(__put_user_8); +#endif + + /* gcc lib functions */ +EXPORT_SYMBOL(__ashldi3); +EXPORT_SYMBOL(__ashrdi3); +EXPORT_SYMBOL(__divsi3); +EXPORT_SYMBOL(__lshrdi3); +EXPORT_SYMBOL(__modsi3); +EXPORT_SYMBOL(__muldi3); +EXPORT_SYMBOL(__ucmpdi2); +EXPORT_SYMBOL(__udivsi3); +EXPORT_SYMBOL(__umodsi3); +EXPORT_SYMBOL(__do_div64); +EXPORT_SYMBOL(__bswapsi2); +EXPORT_SYMBOL(__bswapdi2); + +#ifdef CONFIG_AEABI +EXPORT_SYMBOL(__aeabi_idiv); +EXPORT_SYMBOL(__aeabi_idivmod); +EXPORT_SYMBOL(__aeabi_lasr); +EXPORT_SYMBOL(__aeabi_llsl); +EXPORT_SYMBOL(__aeabi_llsr); +EXPORT_SYMBOL(__aeabi_lmul); +EXPORT_SYMBOL(__aeabi_uidiv); +EXPORT_SYMBOL(__aeabi_uidivmod); +EXPORT_SYMBOL(__aeabi_ulcmp); +#endif + + /* bitops */ +EXPORT_SYMBOL(_set_bit); +EXPORT_SYMBOL(_test_and_set_bit); +EXPORT_SYMBOL(_clear_bit); +EXPORT_SYMBOL(_test_and_clear_bit); +EXPORT_SYMBOL(_change_bit); +EXPORT_SYMBOL(_test_and_change_bit); +EXPORT_SYMBOL(_find_first_zero_bit_le); +EXPORT_SYMBOL(_find_next_zero_bit_le); +EXPORT_SYMBOL(_find_first_bit_le); +EXPORT_SYMBOL(_find_next_bit_le); + +#ifdef __ARMEB__ +EXPORT_SYMBOL(_find_first_zero_bit_be); +EXPORT_SYMBOL(_find_next_zero_bit_be); +EXPORT_SYMBOL(_find_first_bit_be); +EXPORT_SYMBOL(_find_next_bit_be); +#endif + +#ifdef CONFIG_FUNCTION_TRACER +#ifdef CONFIG_OLD_MCOUNT +EXPORT_SYMBOL(mcount); +#endif +EXPORT_SYMBOL(__gnu_mcount_nc); +#endif + +#ifdef CONFIG_ARM_PATCH_PHYS_VIRT +EXPORT_SYMBOL(__pv_phys_pfn_offset); +EXPORT_SYMBOL(__pv_offset); +#endif + +#ifdef CONFIG_HAVE_ARM_SMCCC +EXPORT_SYMBOL(arm_smccc_smc); +EXPORT_SYMBOL(arm_smccc_hvc); +#endif diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index b629d3f11c3d..c73c4030ca5d 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S @@ -7,7 +7,6 @@ #include #include #include -#include #include "entry-header.S" @@ -154,7 +153,6 @@ ENTRY(mcount) __mcount _old #endif ENDPROC(mcount) -EXPORT_SYMBOL(mcount) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller_old) @@ -207,7 +205,6 @@ UNWIND(.fnstart) #endif UNWIND(.fnend) ENDPROC(__gnu_mcount_nc) -EXPORT_SYMBOL(__gnu_mcount_nc) #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(ftrace_caller) diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index f41cee4c5746..04286fd9e09c 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -22,7 +22,6 @@ #include #include #include -#include #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) #include CONFIG_DEBUG_LL_INCLUDE @@ -728,8 +727,6 @@ __pv_phys_pfn_offset: __pv_offset: .quad 0 .size __pv_offset, . -__pv_offset -EXPORT_SYMBOL(__pv_phys_pfn_offset) -EXPORT_SYMBOL(__pv_offset) #endif #include "head-common.S" diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S index 37669e7e13af..2e48b674aab1 100644 --- a/arch/arm/kernel/smccc-call.S +++ b/arch/arm/kernel/smccc-call.S @@ -16,7 +16,6 @@ #include #include #include -#include /* * Wrap c macros in asm macros to delay expansion until after the @@ -52,7 +51,6 @@ UNWIND( .fnend) ENTRY(arm_smccc_smc) SMCCC SMCCC_SMC ENDPROC(arm_smccc_smc) -EXPORT_SYMBOL(arm_smccc_smc) /* * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, @@ -62,4 +60,3 @@ EXPORT_SYMBOL(arm_smccc_smc) ENTRY(arm_smccc_hvc) SMCCC SMCCC_HVC ENDPROC(arm_smccc_hvc) -EXPORT_SYMBOL(arm_smccc_hvc) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index bc698383e822..9688ec0c6ef4 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); } +void dump_backtrace_stm(u32 *stack, u32 instruction) +{ + char str[80], *p; + unsigned int x; + int reg; + + for (reg = 10, x = 0, p = str; reg >= 0; reg--) { + if (instruction & BIT(reg)) { + p += sprintf(p, " r%d:%08x", reg, *stack--); + if (++x == 6) { + x = 0; + p = str; + printk("%s\n", str); + } + } + } + if (p != str) + printk("%s\n", str); +} + #ifndef CONFIG_ARM_UNWIND /* * Stack pointers should always be within the kernels view of diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 7fa487ef7e2f..37b2a11af345 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -3,6 +3,9 @@ * Written by Martin Mares */ +/* No __ro_after_init data in the .rodata section - which will always be ro */ +#define RO_AFTER_INIT_DATA + #include #include #include @@ -223,6 +226,8 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; + *(.data..ro_after_init) + NOSAVE_DATA CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) READ_MOSTLY_DATA(L1_CACHE_BYTES) diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S index a7e7de89bd75..b05e95840651 100644 --- a/arch/arm/lib/ashldi3.S +++ b/arch/arm/lib/ashldi3.S @@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */ #include #include -#include #ifdef __ARMEB__ #define al r1 @@ -53,5 +52,3 @@ ENTRY(__aeabi_llsl) ENDPROC(__ashldi3) ENDPROC(__aeabi_llsl) -EXPORT_SYMBOL(__ashldi3) -EXPORT_SYMBOL(__aeabi_llsl) diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S index 490336e42518..275d7d2341a4 100644 --- a/arch/arm/lib/ashrdi3.S +++ b/arch/arm/lib/ashrdi3.S @@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */ #include #include -#include #ifdef __ARMEB__ #define al r1 @@ -53,5 +52,3 @@ ENTRY(__aeabi_lasr) ENDPROC(__ashrdi3) ENDPROC(__aeabi_lasr) -EXPORT_SYMBOL(__ashrdi3) -EXPORT_SYMBOL(__aeabi_lasr) diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S index fab5a50503ae..7d7952e5a3b1 100644 --- a/arch/arm/lib/backtrace.S +++ b/arch/arm/lib/backtrace.S @@ -10,6 +10,7 @@ * 27/03/03 Ian Molton Clean up CONFIG_CPU * */ +#include #include #include .text @@ -83,13 +84,13 @@ for_each_frame: tst frame, mask @ Check for address exceptions teq r3, r1, lsr #11 ldreq r0, [frame, #-8] @ get sp subeq r0, r0, #4 @ point at the last arg - bleq .Ldumpstm @ dump saved registers + bleq dump_backtrace_stm @ dump saved registers 1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc} ldr r3, .Ldsi @ instruction exists, teq r3, r1, lsr #11 subeq r0, frame, #16 - bleq .Ldumpstm @ dump saved registers + bleq dump_backtrace_stm @ dump saved registers teq sv_fp, #0 @ zero saved fp means beq no_frame @ no further frames @@ -112,38 +113,6 @@ ENDPROC(c_backtrace) .long 1004b, 1006b .popsection -#define instr r4 -#define reg r5 -#define stack r6 - -.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr} - mov stack, r0 - mov instr, r1 - mov reg, #10 - mov r7, #0 -1: mov r3, #1 - ARM( tst instr, r3, lsl reg ) - THUMB( lsl r3, reg ) - THUMB( tst instr, r3 ) - beq 2f - add r7, r7, #1 - teq r7, #6 - moveq r7, #0 - adr r3, .Lcr - addne r3, r3, #1 @ skip newline - ldr r2, [stack], #-4 - mov r1, reg - adr r0, .Lfp - bl printk -2: subs reg, reg, #1 - bpl 1b - teq r7, #0 - adrne r0, .Lcr - blne printk - ldmfd sp!, {instr, reg, stack, r7, pc} - -.Lfp: .asciz " r%d:%08x%s" -.Lcr: .asciz "\n" .Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" .align .Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc} diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index df06638b327c..7d807cfd8ef5 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h @@ -1,6 +1,5 @@ #include #include -#include #if __LINUX_ARM_ARCH__ >= 6 .macro bitop, name, instr @@ -26,7 +25,6 @@ UNWIND( .fnstart ) bx lr UNWIND( .fnend ) ENDPROC(\name ) -EXPORT_SYMBOL(\name ) .endm .macro testop, name, instr, store @@ -57,7 +55,6 @@ UNWIND( .fnstart ) 2: bx lr UNWIND( .fnend ) ENDPROC(\name ) -EXPORT_SYMBOL(\name ) .endm #else .macro bitop, name, instr @@ -77,7 +74,6 @@ UNWIND( .fnstart ) ret lr UNWIND( .fnend ) ENDPROC(\name ) -EXPORT_SYMBOL(\name ) .endm /** @@ -106,6 +102,5 @@ UNWIND( .fnstart ) ret lr UNWIND( .fnend ) ENDPROC(\name ) -EXPORT_SYMBOL(\name ) .endm #endif diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S index f05f78247304..07cda737bb11 100644 --- a/arch/arm/lib/bswapsdi2.S +++ b/arch/arm/lib/bswapsdi2.S @@ -1,6 +1,5 @@ #include #include -#include #if __LINUX_ARM_ARCH__ >= 6 ENTRY(__bswapsi2) @@ -36,5 +35,3 @@ ENTRY(__bswapdi2) ret lr ENDPROC(__bswapdi2) #endif -EXPORT_SYMBOL(__bswapsi2) -EXPORT_SYMBOL(__bswapdi2) diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index b566154f5cf4..e936352ccb00 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S @@ -10,7 +10,6 @@ #include #include #include -#include .text @@ -51,9 +50,6 @@ USER( strnebt r2, [r0]) UNWIND(.fnend) ENDPROC(arm_clear_user) ENDPROC(__clear_user_std) -#ifndef CONFIG_UACCESS_WITH_MEMCPY -EXPORT_SYMBOL(arm_clear_user) -#endif .pushsection .text.fixup,"ax" .align 0 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 63e4c1ed0225..7a4b06049001 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S @@ -13,7 +13,6 @@ #include #include #include -#include /* * Prototype: @@ -95,7 +94,6 @@ ENTRY(arm_copy_from_user) #include "copy_template.S" ENDPROC(arm_copy_from_user) -EXPORT_SYMBOL(arm_copy_from_user) .pushsection .fixup,"ax" .align 0 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S index d97851d4af7a..6ee2f6706f86 100644 --- a/arch/arm/lib/copy_page.S +++ b/arch/arm/lib/copy_page.S @@ -13,7 +13,6 @@ #include #include #include -#include #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 )) @@ -46,4 +45,3 @@ ENTRY(copy_page) PLD( beq 2b ) ldmfd sp!, {r4, pc} @ 3 ENDPROC(copy_page) -EXPORT_SYMBOL(copy_page) diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index 592c179112d1..caf5019d8161 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S @@ -13,7 +13,6 @@ #include #include #include -#include /* * Prototype: @@ -100,9 +99,6 @@ WEAK(arm_copy_to_user) ENDPROC(arm_copy_to_user) ENDPROC(__copy_to_user_std) -#ifndef CONFIG_UACCESS_WITH_MEMCPY -EXPORT_SYMBOL(arm_copy_to_user) -#endif .pushsection .text.fixup,"ax" .align 0 diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S index 68603b5ee537..3ac6ef01bc43 100644 --- a/arch/arm/lib/csumipv6.S +++ b/arch/arm/lib/csumipv6.S @@ -9,7 +9,6 @@ */ #include #include -#include .text @@ -31,4 +30,4 @@ ENTRY(__csum_ipv6_magic) adcs r0, r0, #0 ldmfd sp!, {pc} ENDPROC(__csum_ipv6_magic) -EXPORT_SYMBOL(__csum_ipv6_magic) + diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S index 830b20e81c37..984e0f29d548 100644 --- a/arch/arm/lib/csumpartial.S +++ b/arch/arm/lib/csumpartial.S @@ -9,7 +9,6 @@ */ #include #include -#include .text @@ -141,4 +140,3 @@ ENTRY(csum_partial) bne 4b b .Lless4 ENDPROC(csum_partial) -EXPORT_SYMBOL(csum_partial) diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S index 9c3383fed129..d03fc71fc88c 100644 --- a/arch/arm/lib/csumpartialcopy.S +++ b/arch/arm/lib/csumpartialcopy.S @@ -49,6 +49,5 @@ #define FN_ENTRY ENTRY(csum_partial_copy_nocheck) #define FN_EXIT ENDPROC(csum_partial_copy_nocheck) -#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck) #include "csumpartialcopygeneric.S" diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S index 8b94d20e51d1..10b45909610c 100644 --- a/arch/arm/lib/csumpartialcopygeneric.S +++ b/arch/arm/lib/csumpartialcopygeneric.S @@ -8,7 +8,6 @@ * published by the Free Software Foundation. */ #include -#include /* * unsigned int @@ -332,4 +331,3 @@ FN_ENTRY mov r5, r4, get_byte_1 b .Lexit FN_EXIT -FN_EXPORT diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 5d495edf3d83..1712f132b80d 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S @@ -73,7 +73,6 @@ #define FN_ENTRY ENTRY(csum_partial_copy_from_user) #define FN_EXIT ENDPROC(csum_partial_copy_from_user) -#define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user) #include "csumpartialcopygeneric.S" diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 69aad80a3af4..2cef11884857 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c @@ -24,7 +24,6 @@ #include #include #include -#include #include /* @@ -35,7 +34,6 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = { .const_udelay = __loop_const_udelay, .udelay = __loop_udelay, }; -EXPORT_SYMBOL(arm_delay_ops); static const struct delay_timer *delay_timer; static bool delay_calibrated; diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S index 0c9e1c18fc9e..a9eafe4981eb 100644 --- a/arch/arm/lib/div64.S +++ b/arch/arm/lib/div64.S @@ -15,7 +15,6 @@ #include #include #include -#include #ifdef __ARMEB__ #define xh r0 @@ -211,4 +210,3 @@ Ldiv0_64: UNWIND(.fnend) ENDPROC(__do_div64) -EXPORT_SYMBOL(__do_div64) diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S index 26302b8cd38f..7848780e8834 100644 --- a/arch/arm/lib/findbit.S +++ b/arch/arm/lib/findbit.S @@ -15,7 +15,6 @@ */ #include #include -#include .text /* @@ -38,7 +37,6 @@ ENTRY(_find_first_zero_bit_le) 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_zero_bit_le) -EXPORT_SYMBOL(_find_first_zero_bit_le) /* * Purpose : Find next 'zero' bit @@ -59,7 +57,6 @@ ENTRY(_find_next_zero_bit_le) add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_zero_bit_le) -EXPORT_SYMBOL(_find_next_zero_bit_le) /* * Purpose : Find a 'one' bit @@ -81,7 +78,6 @@ ENTRY(_find_first_bit_le) 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_bit_le) -EXPORT_SYMBOL(_find_first_bit_le) /* * Purpose : Find next 'one' bit @@ -101,7 +97,6 @@ ENTRY(_find_next_bit_le) add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_bit_le) -EXPORT_SYMBOL(_find_next_bit_le) #ifdef __ARMEB__ @@ -121,7 +116,6 @@ ENTRY(_find_first_zero_bit_be) 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_zero_bit_be) -EXPORT_SYMBOL(_find_first_zero_bit_be) ENTRY(_find_next_zero_bit_be) teq r1, #0 @@ -139,7 +133,6 @@ ENTRY(_find_next_zero_bit_be) add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_zero_bit_be) -EXPORT_SYMBOL(_find_next_zero_bit_be) ENTRY(_find_first_bit_be) teq r1, #0 @@ -157,7 +150,6 @@ ENTRY(_find_first_bit_be) 3: mov r0, r1 @ no free bits ret lr ENDPROC(_find_first_bit_be) -EXPORT_SYMBOL(_find_first_bit_be) ENTRY(_find_next_bit_be) teq r1, #0 @@ -174,7 +166,6 @@ ENTRY(_find_next_bit_be) add r2, r2, #1 @ align bit pointer b 2b @ loop for next bit ENDPROC(_find_next_bit_be) -EXPORT_SYMBOL(_find_next_bit_be) #endif diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 9d09a38e73af..8ecfd15c3a02 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S @@ -31,7 +31,6 @@ #include #include #include -#include ENTRY(__get_user_1) check_uaccess r0, 1, r1, r2, __get_user_bad @@ -39,7 +38,6 @@ ENTRY(__get_user_1) mov r0, #0 ret lr ENDPROC(__get_user_1) -EXPORT_SYMBOL(__get_user_1) ENTRY(__get_user_2) check_uaccess r0, 2, r1, r2, __get_user_bad @@ -60,7 +58,6 @@ rb .req r0 mov r0, #0 ret lr ENDPROC(__get_user_2) -EXPORT_SYMBOL(__get_user_2) ENTRY(__get_user_4) check_uaccess r0, 4, r1, r2, __get_user_bad @@ -68,7 +65,6 @@ ENTRY(__get_user_4) mov r0, #0 ret lr ENDPROC(__get_user_4) -EXPORT_SYMBOL(__get_user_4) ENTRY(__get_user_8) check_uaccess r0, 8, r1, r2, __get_user_bad @@ -82,7 +78,6 @@ ENTRY(__get_user_8) mov r0, #0 ret lr ENDPROC(__get_user_8) -EXPORT_SYMBOL(__get_user_8) #ifdef __ARMEB__ ENTRY(__get_user_32t_8) @@ -96,7 +91,6 @@ ENTRY(__get_user_32t_8) mov r0, #0 ret lr ENDPROC(__get_user_32t_8) -EXPORT_SYMBOL(__get_user_32t_8) ENTRY(__get_user_64t_1) check_uaccess r0, 1, r1, r2, __get_user_bad8 @@ -104,7 +98,6 @@ ENTRY(__get_user_64t_1) mov r0, #0 ret lr ENDPROC(__get_user_64t_1) -EXPORT_SYMBOL(__get_user_64t_1) ENTRY(__get_user_64t_2) check_uaccess r0, 2, r1, r2, __get_user_bad8 @@ -121,7 +114,6 @@ rb .req r0 mov r0, #0 ret lr ENDPROC(__get_user_64t_2) -EXPORT_SYMBOL(__get_user_64t_2) ENTRY(__get_user_64t_4) check_uaccess r0, 4, r1, r2, __get_user_bad8 @@ -129,7 +121,6 @@ ENTRY(__get_user_64t_4) mov r0, #0 ret lr ENDPROC(__get_user_64t_4) -EXPORT_SYMBOL(__get_user_64t_4) #endif __get_user_bad8: diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S index 3dff7a3a2aef..c31b2f3153f1 100644 --- a/arch/arm/lib/io-readsb.S +++ b/arch/arm/lib/io-readsb.S @@ -9,7 +9,6 @@ */ #include #include -#include .Linsb_align: rsb ip, ip, #4 cmp ip, r2 @@ -122,4 +121,3 @@ ENTRY(__raw_readsb) ldmfd sp!, {r4 - r6, pc} ENDPROC(__raw_readsb) -EXPORT_SYMBOL(__raw_readsb) diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S index bfd39682325b..2ed86fa5465f 100644 --- a/arch/arm/lib/io-readsl.S +++ b/arch/arm/lib/io-readsl.S @@ -9,7 +9,6 @@ */ #include #include -#include ENTRY(__raw_readsl) teq r2, #0 @ do we have to check for the zero len? @@ -78,4 +77,3 @@ ENTRY(__raw_readsl) strb r3, [r1, #0] ret lr ENDPROC(__raw_readsl) -EXPORT_SYMBOL(__raw_readsl) diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S index b3af3db6caac..413da9914529 100644 --- a/arch/arm/lib/io-readsw-armv3.S +++ b/arch/arm/lib/io-readsw-armv3.S @@ -9,7 +9,6 @@ */ #include #include -#include .Linsw_bad_alignment: adr r0, .Linsw_bad_align_msg @@ -104,4 +103,4 @@ ENTRY(__raw_readsw) ldmfd sp!, {r4, r5, r6, pc} -EXPORT_SYMBOL(__raw_readsw) + diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S index 3c7a7a40b33e..d9a45e9692ae 100644 --- a/arch/arm/lib/io-readsw-armv4.S +++ b/arch/arm/lib/io-readsw-armv4.S @@ -9,7 +9,6 @@ */ #include #include -#include .macro pack, rd, hw1, hw2 #ifndef __ARMEB__ @@ -130,4 +129,3 @@ ENTRY(__raw_readsw) strneb ip, [r1] ldmfd sp!, {r4, pc} ENDPROC(__raw_readsw) -EXPORT_SYMBOL(__raw_readsw) diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S index fa3633594415..a46bbc9b168b 100644 --- a/arch/arm/lib/io-writesb.S +++ b/arch/arm/lib/io-writesb.S @@ -9,7 +9,6 @@ */ #include #include -#include .macro outword, rd #ifndef __ARMEB__ @@ -93,4 +92,3 @@ ENTRY(__raw_writesb) ldmfd sp!, {r4, r5, pc} ENDPROC(__raw_writesb) -EXPORT_SYMBOL(__raw_writesb) diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S index 98ed6aec0b47..4ea2435988c1 100644 --- a/arch/arm/lib/io-writesl.S +++ b/arch/arm/lib/io-writesl.S @@ -9,7 +9,6 @@ */ #include #include -#include ENTRY(__raw_writesl) teq r2, #0 @ do we have to check for the zero len? @@ -66,4 +65,3 @@ ENTRY(__raw_writesl) bne 6b ret lr ENDPROC(__raw_writesl) -EXPORT_SYMBOL(__raw_writesl) diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S index 577184c082bb..121789eb6802 100644 --- a/arch/arm/lib/io-writesw-armv3.S +++ b/arch/arm/lib/io-writesw-armv3.S @@ -9,7 +9,6 @@ */ #include #include -#include .Loutsw_bad_alignment: adr r0, .Loutsw_bad_align_msg @@ -125,4 +124,3 @@ ENTRY(__raw_writesw) strne ip, [r0] ldmfd sp!, {r4, r5, r6, pc} -EXPORT_SYMBOL(__raw_writesw) diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S index e335f489d1fc..269f90c51ad2 100644 --- a/arch/arm/lib/io-writesw-armv4.S +++ b/arch/arm/lib/io-writesw-armv4.S @@ -9,7 +9,6 @@ */ #include #include -#include .macro outword, rd #ifndef __ARMEB__ @@ -99,4 +98,3 @@ ENTRY(__raw_writesw) strneh ip, [r0] ret lr ENDPROC(__raw_writesw) -EXPORT_SYMBOL(__raw_writesw) diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S index f541bc013bff..9397b2e532af 100644 --- a/arch/arm/lib/lib1funcs.S +++ b/arch/arm/lib/lib1funcs.S @@ -36,7 +36,6 @@ Boston, MA 02111-1307, USA. */ #include #include #include -#include .macro ARM_DIV_BODY dividend, divisor, result, curbit @@ -239,8 +238,6 @@ UNWIND(.fnstart) UNWIND(.fnend) ENDPROC(__udivsi3) ENDPROC(__aeabi_uidiv) -EXPORT_SYMBOL(__udivsi3) -EXPORT_SYMBOL(__aeabi_uidiv) ENTRY(__umodsi3) UNWIND(.fnstart) @@ -259,7 +256,6 @@ UNWIND(.fnstart) UNWIND(.fnend) ENDPROC(__umodsi3) -EXPORT_SYMBOL(__umodsi3) #ifdef CONFIG_ARM_PATCH_IDIV .align 3 @@ -307,8 +303,6 @@ UNWIND(.fnstart) UNWIND(.fnend) ENDPROC(__divsi3) ENDPROC(__aeabi_idiv) -EXPORT_SYMBOL(__divsi3) -EXPORT_SYMBOL(__aeabi_idiv) ENTRY(__modsi3) UNWIND(.fnstart) @@ -333,7 +327,6 @@ UNWIND(.fnstart) UNWIND(.fnend) ENDPROC(__modsi3) -EXPORT_SYMBOL(__modsi3) #ifdef CONFIG_AEABI @@ -350,7 +343,6 @@ UNWIND(.save {r0, r1, ip, lr} ) UNWIND(.fnend) ENDPROC(__aeabi_uidivmod) -EXPORT_SYMBOL(__aeabi_uidivmod) ENTRY(__aeabi_idivmod) UNWIND(.fnstart) @@ -364,7 +356,6 @@ UNWIND(.save {r0, r1, ip, lr} ) UNWIND(.fnend) ENDPROC(__aeabi_idivmod) -EXPORT_SYMBOL(__aeabi_idivmod) #endif diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S index e40833981417..922dcd88b02b 100644 --- a/arch/arm/lib/lshrdi3.S +++ b/arch/arm/lib/lshrdi3.S @@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */ #include #include -#include #ifdef __ARMEB__ #define al r1 @@ -53,5 +52,3 @@ ENTRY(__aeabi_llsr) ENDPROC(__lshrdi3) ENDPROC(__aeabi_llsr) -EXPORT_SYMBOL(__lshrdi3) -EXPORT_SYMBOL(__aeabi_llsr) diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S index 44182bf686a5..74a5bed6d999 100644 --- a/arch/arm/lib/memchr.S +++ b/arch/arm/lib/memchr.S @@ -11,7 +11,6 @@ */ #include #include -#include .text .align 5 @@ -25,4 +24,3 @@ ENTRY(memchr) 2: movne r0, #0 ret lr ENDPROC(memchr) -EXPORT_SYMBOL(memchr) diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S index 1be5b6ddf37c..64111bd4440b 100644 --- a/arch/arm/lib/memcpy.S +++ b/arch/arm/lib/memcpy.S @@ -13,7 +13,6 @@ #include #include #include -#include #define LDR1W_SHIFT 0 #define STR1W_SHIFT 0 @@ -69,5 +68,3 @@ ENTRY(memcpy) ENDPROC(memcpy) ENDPROC(mmiocpy) -EXPORT_SYMBOL(memcpy) -EXPORT_SYMBOL(mmiocpy) diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S index 71dcc5400d02..69a9d47fc5ab 100644 --- a/arch/arm/lib/memmove.S +++ b/arch/arm/lib/memmove.S @@ -13,7 +13,6 @@ #include #include #include -#include .text @@ -226,4 +225,3 @@ ENTRY(memmove) 18: backward_copy_shift push=24 pull=8 ENDPROC(memmove) -EXPORT_SYMBOL(memmove) diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 7b72044cba62..3c65e3bd790f 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S @@ -12,7 +12,6 @@ #include #include #include -#include .text .align 5 @@ -136,5 +135,3 @@ UNWIND( .fnstart ) UNWIND( .fnend ) ENDPROC(memset) ENDPROC(mmioset) -EXPORT_SYMBOL(memset) -EXPORT_SYMBOL(mmioset) diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S index 6dec26ed5bcc..0eded952e089 100644 --- a/arch/arm/lib/memzero.S +++ b/arch/arm/lib/memzero.S @@ -10,7 +10,6 @@ #include #include #include -#include .text .align 5 @@ -136,4 +135,3 @@ UNWIND( .fnstart ) ret lr @ 1 UNWIND( .fnend ) ENDPROC(__memzero) -EXPORT_SYMBOL(__memzero) diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S index b8f12388ccac..204305956925 100644 --- a/arch/arm/lib/muldi3.S +++ b/arch/arm/lib/muldi3.S @@ -12,7 +12,6 @@ #include #include -#include #ifdef __ARMEB__ #define xh r0 @@ -47,5 +46,3 @@ ENTRY(__aeabi_lmul) ENDPROC(__muldi3) ENDPROC(__aeabi_lmul) -EXPORT_SYMBOL(__muldi3) -EXPORT_SYMBOL(__aeabi_lmul) diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S index 11de126e2ed6..38d660d3705f 100644 --- a/arch/arm/lib/putuser.S +++ b/arch/arm/lib/putuser.S @@ -31,7 +31,6 @@ #include #include #include -#include ENTRY(__put_user_1) check_uaccess r0, 1, r1, ip, __put_user_bad @@ -39,7 +38,6 @@ ENTRY(__put_user_1) mov r0, #0 ret lr ENDPROC(__put_user_1) -EXPORT_SYMBOL(__put_user_1) ENTRY(__put_user_2) check_uaccess r0, 2, r1, ip, __put_user_bad @@ -64,7 +62,6 @@ ENTRY(__put_user_2) mov r0, #0 ret lr ENDPROC(__put_user_2) -EXPORT_SYMBOL(__put_user_2) ENTRY(__put_user_4) check_uaccess r0, 4, r1, ip, __put_user_bad @@ -72,7 +69,6 @@ ENTRY(__put_user_4) mov r0, #0 ret lr ENDPROC(__put_user_4) -EXPORT_SYMBOL(__put_user_4) ENTRY(__put_user_8) check_uaccess r0, 8, r1, ip, __put_user_bad @@ -86,7 +82,6 @@ ENTRY(__put_user_8) mov r0, #0 ret lr ENDPROC(__put_user_8) -EXPORT_SYMBOL(__put_user_8) __put_user_bad: mov r0, #-EFAULT diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S index 7301f6e6046c..013d64c71e8d 100644 --- a/arch/arm/lib/strchr.S +++ b/arch/arm/lib/strchr.S @@ -11,7 +11,6 @@ */ #include #include -#include .text .align 5 @@ -26,4 +25,3 @@ ENTRY(strchr) subeq r0, r0, #1 ret lr ENDPROC(strchr) -EXPORT_SYMBOL(strchr) diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S index aaf9fd98b754..3cec1c7482c4 100644 --- a/arch/arm/lib/strrchr.S +++ b/arch/arm/lib/strrchr.S @@ -11,7 +11,6 @@ */ #include #include -#include .text .align 5 @@ -25,4 +24,3 @@ ENTRY(strrchr) mov r0, r3 ret lr ENDPROC(strrchr) -EXPORT_SYMBOL(strrchr) diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 1626e3a551a1..6bd1089b07e0 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include @@ -157,7 +156,6 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) } return n; } -EXPORT_SYMBOL(arm_copy_to_user); static unsigned long noinline __clear_user_memset(void __user *addr, unsigned long n) @@ -215,7 +213,6 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n) } return n; } -EXPORT_SYMBOL(arm_clear_user); #if 0 diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S index 127a91af46f3..ad4a6309141a 100644 --- a/arch/arm/lib/ucmpdi2.S +++ b/arch/arm/lib/ucmpdi2.S @@ -12,7 +12,6 @@ #include #include -#include #ifdef __ARMEB__ #define xh r0 @@ -36,7 +35,6 @@ ENTRY(__ucmpdi2) ret lr ENDPROC(__ucmpdi2) -EXPORT_SYMBOL(__ucmpdi2) #ifdef CONFIG_AEABI @@ -50,7 +48,6 @@ ENTRY(__aeabi_ulcmp) ret lr ENDPROC(__aeabi_ulcmp) -EXPORT_SYMBOL(__aeabi_ulcmp) #endif diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 737450fe790c..cab128913e72 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile @@ -32,6 +32,7 @@ endif ifdef CONFIG_SND_IMX_SOC obj-y += ssi-fiq.o +obj-y += ssi-fiq-ksym.o endif # i.MX21 based machines diff --git a/arch/arm/mach-imx/ssi-fiq-ksym.c b/arch/arm/mach-imx/ssi-fiq-ksym.c new file mode 100644 index 000000000000..792090f9a032 --- /dev/null +++ b/arch/arm/mach-imx/ssi-fiq-ksym.c @@ -0,0 +1,20 @@ +/* + * Exported ksyms for the SSI FIQ handler + * + * Copyright (C) 2009, Sascha Hauer + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include + +EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer); +EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer); +EXPORT_SYMBOL(imx_ssi_fiq_start); +EXPORT_SYMBOL(imx_ssi_fiq_end); +EXPORT_SYMBOL(imx_ssi_fiq_base); + diff --git a/arch/arm/mach-imx/ssi-fiq.S b/arch/arm/mach-imx/ssi-fiq.S index fd7917f1c204..a8b93c5f29b5 100644 --- a/arch/arm/mach-imx/ssi-fiq.S +++ b/arch/arm/mach-imx/ssi-fiq.S @@ -8,7 +8,6 @@ #include #include -#include /* * r8 = bit 0-15: tx offset, bit 16-31: tx buffer size @@ -145,8 +144,4 @@ imx_ssi_fiq_tx_buffer: .word 0x0 .L_imx_ssi_fiq_end: imx_ssi_fiq_end: -EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer) -EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer) -EXPORT_SYMBOL(imx_ssi_fiq_start) -EXPORT_SYMBOL(imx_ssi_fiq_end) -EXPORT_SYMBOL(imx_ssi_fiq_base) + diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index a9afeebd59f2..0465338183c7 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -71,6 +71,7 @@ config SOC_AM43XX select HAVE_ARM_TWD select ARM_ERRATA_754322 select ARM_ERRATA_775420 + select OMAP_INTERCONNECT config SOC_DRA7XX bool "TI DRA7XX" diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 2abd53ae3e7a..cc6d9fa60924 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -205,11 +205,15 @@ void __init omap2xxx_check_revision(void) #define OMAP3_SHOW_FEATURE(feat) \ if (omap3_has_ ##feat()) \ - printk(#feat" "); + n += scnprintf(buf + n, sizeof(buf) - n, #feat " "); static void __init omap3_cpuinfo(void) { const char *cpu_name; + char buf[64]; + int n = 0; + + memset(buf, 0, sizeof(buf)); /* * OMAP3430 and OMAP3530 are assumed to be same. @@ -241,10 +245,10 @@ static void __init omap3_cpuinfo(void) cpu_name = "OMAP3503"; } - sprintf(soc_name, "%s", cpu_name); + scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name); /* Print verbose information */ - pr_info("%s %s (", soc_name, soc_rev); + n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev); OMAP3_SHOW_FEATURE(l2cache); OMAP3_SHOW_FEATURE(iva); @@ -252,8 +256,10 @@ static void __init omap3_cpuinfo(void) OMAP3_SHOW_FEATURE(neon); OMAP3_SHOW_FEATURE(isp); OMAP3_SHOW_FEATURE(192mhz_clk); - - printk(")\n"); + if (*(buf + n - 1) == ' ') + n--; + n += scnprintf(buf + n, sizeof(buf) - n, ")\n"); + pr_info("%s", buf); } #define OMAP3_CHECK_FEATURE(status,feat) \ diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 62680aad2126..718981bb80cd 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -319,6 +319,9 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva) if (has_uart4) { en_uart4_mask = OMAP3630_EN_UART4_MASK; grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK; + } else { + en_uart4_mask = 0; + grpsel_uart4_mask = 0; } /* Enable wakeups in PER */ diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c index cba8cada8c81..cd15dbd62671 100644 --- a/arch/arm/mach-omap2/voltage.c +++ b/arch/arm/mach-omap2/voltage.c @@ -87,6 +87,12 @@ int voltdm_scale(struct voltagedomain *voltdm, return -ENODATA; } + if (!voltdm->volt_data) { + pr_err("%s: No voltage data defined for vdd_%s\n", + __func__, voltdm->name); + return -ENODATA; + } + /* Adjust voltage to the exact voltage from the OPP table */ for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { if (voltdm->volt_data[i].volt_nominal >= target_volt) { diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ab4f74536057..ab7710002ba6 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1167,7 +1167,7 @@ static int __init dma_debug_do_init(void) dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } -fs_initcall(dma_debug_do_init); +core_initcall(dma_debug_do_init); #ifdef CONFIG_ARM_DMA_USE_IOMMU diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S index f6d333f09bfe..8dea61640cc1 100644 --- a/arch/arm/mm/proc-v7m.S +++ b/arch/arm/mm/proc-v7m.S @@ -96,7 +96,7 @@ ENTRY(cpu_cm7_proc_fin) ret lr ENDPROC(cpu_cm7_proc_fin) - .section ".text.init", #alloc, #execinstr + .section ".init.text", #alloc, #execinstr __v7m_cm7_setup: mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP) diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index 334271a25f70..7d3a2acc6a55 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -393,7 +393,7 @@ #address-cells = <3>; #size-cells = <2>; dma-coherent; - ranges = <0x01000000 0x00 0x5f800000 0x00 0x5f800000 0x0 0x00800000>, + ranges = <0x01000000 0x00 0x00000000 0x00 0x5f800000 0x0 0x00800000>, <0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>, <0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>; #interrupt-cells = <1>; diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts index 123a58b29cbd..f0b857d6d73c 100644 --- a/arch/arm64/boot/dts/arm/juno-r1.dts +++ b/arch/arm64/boot/dts/arm/juno-r1.dts @@ -76,7 +76,7 @@ compatible = "arm,idle-state"; arm,psci-suspend-param = <0x1010000>; local-timer-stop; - entry-latency-us = <300>; + entry-latency-us = <400>; exit-latency-us = <1200>; min-residency-us = <2500>; }; diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts index 007be826efce..26aaa6a7670f 100644 --- a/arch/arm64/boot/dts/arm/juno-r2.dts +++ b/arch/arm64/boot/dts/arm/juno-r2.dts @@ -76,7 +76,7 @@ compatible = "arm,idle-state"; arm,psci-suspend-param = <0x1010000>; local-timer-stop; - entry-latency-us = <300>; + entry-latency-us = <400>; exit-latency-us = <1200>; min-residency-us = <2500>; }; diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index a7270eff6939..6e154d948a80 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -76,7 +76,7 @@ compatible = "arm,idle-state"; arm,psci-suspend-param = <0x1010000>; local-timer-stop; - entry-latency-us = <300>; + entry-latency-us = <400>; exit-latency-us = <1200>; min-residency-us = <2500>; }; diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index c4762538ec01..e9bd58793464 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi @@ -105,7 +105,7 @@ status = "disabled"; }; - nb_perih_clk: nb-periph-clk@13000{ + nb_periph_clk: nb-periph-clk@13000 { compatible = "marvell,armada-3700-periph-clock-nb"; reg = <0x13000 0x100>; clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, @@ -113,7 +113,7 @@ #clock-cells = <1>; }; - sb_perih_clk: sb-periph-clk@18000{ + sb_periph_clk: sb-periph-clk@18000 { compatible = "marvell,armada-3700-periph-clock-sb"; reg = <0x18000 0x100>; clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index 842fb333285c..6bf9e241179b 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi @@ -130,8 +130,8 @@ reg = <0x700600 0x50>; #address-cells = <0x1>; #size-cells = <0x0>; - cell-index = <1>; - clocks = <&cps_syscon0 0 3>; + cell-index = <3>; + clocks = <&cps_syscon0 1 21>; status = "disabled"; }; @@ -140,7 +140,7 @@ reg = <0x700680 0x50>; #address-cells = <1>; #size-cells = <0>; - cell-index = <2>; + cell-index = <4>; clocks = <&cps_syscon0 1 21>; status = "disabled"; }; diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 2065f46fa740..38b6a2b49d68 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -46,7 +46,15 @@ #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ -#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ +/* + * PMUv3 event types: required events + */ +#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 +#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 +#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 +#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 /* * Event filters for PMUv3 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index a9310a69fffd..57ae9d9ed9bb 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -31,17 +31,9 @@ /* * ARMv8 PMUv3 Performance Events handling code. - * Common event types. + * Common event types (some are defined in asm/perf_event.h). */ -/* Required events. */ -#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 -#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 -#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 -#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 -#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 -#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 - /* At least one of the following is required. */ #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08 #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index f302fdb3a030..87e7e6608cd8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -597,8 +597,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, idx = ARMV8_PMU_CYCLE_IDX; } else { - BUG(); + return false; } + } else if (r->CRn == 0 && r->CRm == 9) { + /* PMCCNTR */ + if (pmu_access_event_counter_el0_disabled(vcpu)) + return false; + + idx = ARMV8_PMU_CYCLE_IDX; } else if (r->CRn == 14 && (r->CRm & 12) == 8) { /* PMEVCNTRn_EL0 */ if (pmu_access_event_counter_el0_disabled(vcpu)) @@ -606,7 +612,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); } else { - BUG(); + return false; } if (!pmu_counter_idx_valid(vcpu, idx)) diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 7dd2dd47909a..df78b2ca70eb 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -214,6 +214,12 @@ #error Bad page size configuration for hugetlbfs! #endif +/* + * Wired register bits + */ +#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16) +#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0) + /* * Values used for computation of new tlb entries */ diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h index 4a2349302b55..dd179fd8acda 100644 --- a/arch/mips/include/asm/tlb.h +++ b/arch/mips/include/asm/tlb.h @@ -1,6 +1,9 @@ #ifndef __ASM_TLB_H #define __ASM_TLB_H +#include +#include + /* * MIPS doesn't need any special per-pte or per-vma handling, except * we need to flush cache for area to be unmapped. @@ -22,6 +25,16 @@ ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \ (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) +static inline unsigned int num_wired_entries(void) +{ + unsigned int wired = read_c0_wired(); + + if (cpu_has_mips_r6) + wired &= MIPSR6_WIRED_WIRED; + + return wired; +} + #include #endif /* __ASM_TLB_H */ diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index d56a855828c2..3bef306cdfdb 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -209,17 +209,18 @@ bad_area_nosemaphore: if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && __ratelimit(&ratelimit_state)) { - pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx", + pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n", tsk->comm, write ? "write access to" : "read access from", field, address); pr_info("epc = %0*lx in", field, (unsigned long) regs->cp0_epc); - print_vma_addr(" ", regs->cp0_epc); + print_vma_addr(KERN_CONT " ", regs->cp0_epc); + pr_cont("\n"); pr_info("ra = %0*lx in", field, (unsigned long) regs->regs[31]); - print_vma_addr(" ", regs->regs[31]); - pr_info("\n"); + print_vma_addr(KERN_CONT " ", regs->regs[31]); + pr_cont("\n"); } current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; info.si_signo = SIGSEGV; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 3a6edecc3f38..e86ebcf5c071 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -118,7 +118,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) writex_c0_entrylo1(entrylo); } #endif - tlbidx = read_c0_wired(); + tlbidx = num_wired_entries(); write_c0_wired(tlbidx + 1); write_c0_index(tlbidx); mtc0_tlbw_hazard(); @@ -147,7 +147,7 @@ void kunmap_coherent(void) local_irq_save(flags); old_ctx = read_c0_entryhi(); - wired = read_c0_wired() - 1; + wired = num_wired_entries() - 1; write_c0_wired(wired); write_c0_index(wired); write_c0_entryhi(UNIQUE_ENTRYHI(wired)); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index bba9c1484b41..0596505770db 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -65,7 +65,7 @@ void local_flush_tlb_all(void) write_c0_entrylo0(0); write_c0_entrylo1(0); - entry = read_c0_wired(); + entry = num_wired_entries(); /* * Blast 'em all away. @@ -385,7 +385,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, old_ctx = read_c0_entryhi(); htw_stop(); old_pagemask = read_c0_pagemask(); - wired = read_c0_wired(); + wired = num_wired_entries(); write_c0_wired(wired + 1); write_c0_index(wired); tlbw_use_hazard(); /* What is the hazard here? */ @@ -449,7 +449,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, htw_stop(); old_ctx = read_c0_entryhi(); old_pagemask = read_c0_pagemask(); - wired = read_c0_wired(); + wired = num_wired_entries(); if (--temp_tlb_entry < wired) { printk(KERN_WARNING "No TLB space left for add_temporary_entry\n"); diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 71c4a3aa3752..a14b86587013 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -34,7 +34,9 @@ config PARISC select HAVE_ARCH_HASH select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK - select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT) + select GENERIC_SCHED_CLOCK + select HAVE_UNSTABLE_SCHED_CLOCK if SMP + select GENERIC_CLOCKEVENTS select ARCH_NO_COHERENT_DMA_MMAP select CPU_NO_EFFICIENT_FFS diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 629eb464d5ba..c263301648f3 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void) { unsigned long rangetime, alltime; unsigned long size, start; + unsigned long threshold; alltime = mfctl(16); flush_data_cache(); @@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void) printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", alltime, size, rangetime); - /* Racy, but if we see an intermediate value, it's ok too... */ - parisc_cache_flush_threshold = size * alltime / rangetime; - - parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold); - if (!parisc_cache_flush_threshold) - parisc_cache_flush_threshold = FLUSH_THRESHOLD; - - if (parisc_cache_flush_threshold > cache_info.dc_size) - parisc_cache_flush_threshold = cache_info.dc_size; - - printk(KERN_INFO "Setting cache flush threshold to %lu kB\n", + threshold = L1_CACHE_ALIGN(size * alltime / rangetime); + if (threshold > cache_info.dc_size) + threshold = cache_info.dc_size; + if (threshold) + parisc_cache_flush_threshold = threshold; + printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", parisc_cache_flush_threshold/1024); /* calculate TLB flush threshold */ @@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void) flush_tlb_all(); alltime = mfctl(16) - alltime; - size = PAGE_SIZE; + size = 0; start = (unsigned long) _text; rangetime = mfctl(16); while (start < (unsigned long) _end) { @@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void) printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", alltime, size, rangetime); - parisc_tlb_flush_threshold = size * alltime / rangetime; - parisc_tlb_flush_threshold *= num_online_cpus(); - parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold); - if (!parisc_tlb_flush_threshold) - parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; - - printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n", + threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); + if (threshold) + parisc_tlb_flush_threshold = threshold; + printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", parisc_tlb_flush_threshold/1024); } diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index 545f9d2fe711..c05d1876d27c 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -58,7 +58,7 @@ void __init setup_pdc(void) status = pdc_system_map_find_mods(&module_result, &module_path, 0); if (status == PDC_OK) { pdc_type = PDC_TYPE_SYSTEM_MAP; - printk("System Map.\n"); + pr_cont("System Map.\n"); return; } @@ -77,7 +77,7 @@ void __init setup_pdc(void) status = pdc_pat_cell_get_number(&cell_info); if (status == PDC_OK) { pdc_type = PDC_TYPE_PAT; - printk("64 bit PAT.\n"); + pr_cont("64 bit PAT.\n"); return; } #endif @@ -97,12 +97,12 @@ void __init setup_pdc(void) case 0xC: /* 715/64, at least */ pdc_type = PDC_TYPE_SNAKE; - printk("Snake.\n"); + pr_cont("Snake.\n"); return; default: /* Everything else */ - printk("Unsupported.\n"); + pr_cont("Unsupported.\n"); panic("If this is a 64-bit machine, please try a 64-bit kernel.\n"); } } diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 985e06da37f5..1b39a2acaadf 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */ fitmanymiddle: /* Loop if LOOP >= 2 */ addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ - pitlbe 0(%sr1, %r28) + pitlbe %r0(%sr1, %r28) pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ copy %arg3, %r31 /* Re-init inner loop count */ @@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */ fdtmanymiddle: /* Loop if LOOP >= 2 */ addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ - pdtlbe 0(%sr1, %r28) + pdtlbe %r0(%sr1, %r28) pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ copy %arg3, %r31 /* Re-init inner loop count */ @@ -626,12 +626,12 @@ ENTRY_CFI(copy_user_page_asm) /* Purge any old translations */ #ifdef CONFIG_PA20 - pdtlb,l 0(%r28) - pdtlb,l 0(%r29) + pdtlb,l %r0(%r28) + pdtlb,l %r0(%r29) #else tlb_lock %r20,%r21,%r22 - pdtlb 0(%r28) - pdtlb 0(%r29) + pdtlb %r0(%r28) + pdtlb %r0(%r29) tlb_unlock %r20,%r21,%r22 #endif @@ -774,10 +774,10 @@ ENTRY_CFI(clear_user_page_asm) /* Purge any old translation */ #ifdef CONFIG_PA20 - pdtlb,l 0(%r28) + pdtlb,l %r0(%r28) #else tlb_lock %r20,%r21,%r22 - pdtlb 0(%r28) + pdtlb %r0(%r28) tlb_unlock %r20,%r21,%r22 #endif @@ -858,10 +858,10 @@ ENTRY_CFI(flush_dcache_page_asm) /* Purge any old translation */ #ifdef CONFIG_PA20 - pdtlb,l 0(%r28) + pdtlb,l %r0(%r28) #else tlb_lock %r20,%r21,%r22 - pdtlb 0(%r28) + pdtlb %r0(%r28) tlb_unlock %r20,%r21,%r22 #endif @@ -898,10 +898,10 @@ ENTRY_CFI(flush_dcache_page_asm) sync #ifdef CONFIG_PA20 - pdtlb,l 0(%r25) + pdtlb,l %r0(%r25) #else tlb_lock %r20,%r21,%r22 - pdtlb 0(%r25) + pdtlb %r0(%r25) tlb_unlock %r20,%r21,%r22 #endif @@ -931,13 +931,18 @@ ENTRY_CFI(flush_icache_page_asm) depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ #endif - /* Purge any old translation */ + /* Purge any old translation. Note that the FIC instruction + * may use either the instruction or data TLB. Given that we + * have a flat address space, it's not clear which TLB will be + * used. So, we purge both entries. */ #ifdef CONFIG_PA20 + pdtlb,l %r0(%r28) pitlb,l %r0(%sr4,%r28) #else tlb_lock %r20,%r21,%r22 - pitlb (%sr4,%r28) + pdtlb %r0(%r28) + pitlb %r0(%sr4,%r28) tlb_unlock %r20,%r21,%r22 #endif @@ -976,10 +981,12 @@ ENTRY_CFI(flush_icache_page_asm) sync #ifdef CONFIG_PA20 + pdtlb,l %r0(%r28) pitlb,l %r0(%sr4,%r25) #else tlb_lock %r20,%r21,%r22 - pitlb (%sr4,%r25) + pdtlb %r0(%r28) + pitlb %r0(%sr4,%r25) tlb_unlock %r20,%r21,%r22 #endif diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 02d9ed0f3949..494ff6e8c88a 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c @@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte, if (!pte_none(*pte)) printk(KERN_ERR "map_pte_uncached: page already exists\n"); - set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); purge_tlb_start(flags); + set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); pdtlb_kernel(orig_vaddr); purge_tlb_end(flags); vaddr += PAGE_SIZE; diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 81d6f6391944..2e66a887788e 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -334,6 +334,10 @@ static int __init parisc_init(void) /* tell PDC we're Linux. Nevermind failure. */ pdc_stable_write(0x40, &osid, sizeof(osid)); + /* start with known state */ + flush_cache_all_local(); + flush_tlb_all_local(NULL); + processor_init(); #ifdef CONFIG_SMP pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n", diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 9b63b876a13a..325f30d82b64 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -39,18 +40,6 @@ static unsigned long clocktick __read_mostly; /* timer cycles per tick */ -#ifndef CONFIG_64BIT -/* - * The processor-internal cycle counter (Control Register 16) is used as time - * source for the sched_clock() function. This register is 64bit wide on a - * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always - * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits - * with a per-cpu variable which we increase every time the counter - * wraps-around (which happens every ~4 secounds). - */ -static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits); -#endif - /* * We keep time on PA-RISC Linux by using the Interval Timer which is * a pair of registers; one is read-only and one is write-only; both @@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) */ mtctl(next_tick, 16); -#if !defined(CONFIG_64BIT) - /* check for overflow on a 32bit kernel (every ~4 seconds). */ - if (unlikely(next_tick < now)) - this_cpu_inc(cr16_high_32_bits); -#endif - /* Skip one clocktick on purpose if we missed next_tick. * The new CR16 must be "later" than current CR16 otherwise * itimer would not fire until CR16 wrapped - e.g 4 seconds @@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc); /* clock source code */ -static cycle_t read_cr16(struct clocksource *cs) +static cycle_t notrace read_cr16(struct clocksource *cs) { return get_cycles(); } @@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts) } -/* - * sched_clock() framework - */ - -static u32 cyc2ns_mul __read_mostly; -static u32 cyc2ns_shift __read_mostly; - -u64 sched_clock(void) +static u64 notrace read_cr16_sched_clock(void) { - u64 now; - - /* Get current cycle counter (Control Register 16). */ -#ifdef CONFIG_64BIT - now = mfctl(16); -#else - now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32); -#endif - - /* return the value in ns (cycles_2_ns) */ - return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift); + return get_cycles(); } @@ -316,17 +282,16 @@ u64 sched_clock(void) void __init time_init(void) { - unsigned long current_cr16_khz; + unsigned long cr16_hz; - current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ clocktick = (100 * PAGE0->mem_10msec) / HZ; - - /* calculate mult/shift values for cr16 */ - clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, - NSEC_PER_MSEC, 0); - start_cpu_itimer(); /* get CPU 0 started */ + cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ + /* register at clocksource framework */ - clocksource_register_khz(&clocksource_cr16, current_cr16_khz); + clocksource_register_hz(&clocksource_cr16, cr16_hz); + + /* register as sched_clock source */ + sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); } diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index 57d42d129033..78aaf4ffd7ab 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c @@ -232,8 +232,12 @@ void start(void) console_ops.close(); kentry = (kernel_entry_t) vmlinux.addr; - if (ft_addr) - kentry(ft_addr, 0, NULL); + if (ft_addr) { + if(platform_ops.kentry) + platform_ops.kentry(ft_addr, vmlinux.addr); + else + kentry(ft_addr, 0, NULL); + } else kentry((unsigned long)initrd.addr, initrd.size, loader_info.promptr); diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S index ff2f1b97bc53..2a99fc9a3ccf 100644 --- a/arch/powerpc/boot/opal-calls.S +++ b/arch/powerpc/boot/opal-calls.S @@ -12,6 +12,19 @@ .text + .globl opal_kentry +opal_kentry: + /* r3 is the fdt ptr */ + mtctr r4 + li r4, 0 + li r5, 0 + li r6, 0 + li r7, 0 + ld r11,opal@got(r2) + ld r8,0(r11) + ld r9,8(r11) + bctr + #define OPAL_CALL(name, token) \ .globl name; \ name: \ diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c index 1f37e1c1d6d8..d7b4fd47eb44 100644 --- a/arch/powerpc/boot/opal.c +++ b/arch/powerpc/boot/opal.c @@ -23,14 +23,25 @@ struct opal { static u32 opal_con_id; +/* see opal-wrappers.S */ int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer); int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer); int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length); int64_t opal_console_flush(uint64_t term_number); int64_t opal_poll_events(uint64_t *outstanding_event_mask); +void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr); + static int opal_con_open(void) { + /* + * When OPAL loads the boot kernel it stashes the OPAL base and entry + * address in r8 and r9 so the kernel can use the OPAL console + * before unflattening the devicetree. While executing the wrapper will + * probably trash r8 and r9 so this kentry hook restores them before + * entering the decompressed kernel. + */ + platform_ops.kentry = opal_kentry; return 0; } diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index 309d1b127e96..fad1862f4b2d 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h @@ -30,6 +30,7 @@ struct platform_ops { void * (*realloc)(void *ptr, unsigned long size); void (*exit)(void); void * (*vmlinux_alloc)(unsigned long size); + void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr); }; extern struct platform_ops platform_ops; diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index d1492736d852..e0baba1535e6 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -14,6 +14,10 @@ #include #include +#include +#include +#include +#include #include @@ -109,4 +113,12 @@ void early_setup_secondary(void); /* time */ void accumulate_stolen_time(void); +/* misc runtime */ +extern u64 __bswapdi2(u64); +extern s64 __lshrdi3(s64, int); +extern s64 __ashldi3(s64, int); +extern s64 __ashrdi3(s64, int); +extern int __cmpdi2(s64, s64); +extern int __ucmpdi2(u64, u64); + #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 84d49b197c32..9a3eee661297 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -91,7 +91,7 @@ */ #define LOAD_HANDLER(reg, label) \ ld reg,PACAKBASE(r13); /* get high part of &label */ \ - ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l; + ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label); #define __LOAD_HANDLER(reg, label) \ ld reg,PACAKBASE(r13); \ @@ -158,14 +158,17 @@ BEGIN_FTR_SECTION_NESTED(943) \ std ra,offset(r13); \ END_FTR_SECTION_NESTED(ftr,ftr,943) -#define EXCEPTION_PROLOG_0(area) \ - GET_PACA(r13); \ +#define EXCEPTION_PROLOG_0_PACA(area) \ std r9,area+EX_R9(r13); /* save r9 */ \ OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ HMT_MEDIUM; \ std r10,area+EX_R10(r13); /* save r10 - r12 */ \ OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) +#define EXCEPTION_PROLOG_0(area) \ + GET_PACA(r13); \ + EXCEPTION_PROLOG_0_PACA(area) + #define __EXCEPTION_PROLOG_1(area, extra, vec) \ OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ @@ -196,6 +199,12 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) EXCEPTION_PROLOG_1(area, extra, vec); \ EXCEPTION_PROLOG_PSERIES_1(label, h); +/* Have the PACA in r13 already */ +#define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec) \ + EXCEPTION_PROLOG_0_PACA(area); \ + EXCEPTION_PROLOG_1(area, extra, vec); \ + EXCEPTION_PROLOG_PSERIES_1(label, h); + #define __KVMTEST(h, n) \ lbz r10,HSTATE_IN_GUEST(r13); \ cmpwi r10,0; \ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index e88368354e49..e311c25751a4 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -28,6 +28,12 @@ * Individual features below. */ +/* + * Kernel read only support. + * We added the ppp value 0b110 in ISA 2.04. + */ +#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000) + /* * We need to clear top 16bits of va (from the remaining 64 bits )in * tlbie* instructions @@ -103,10 +109,10 @@ #define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 #define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE -#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE -#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE -#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE -#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE +#define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO +#define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO +#define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO +#define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ MMU_FTR_CI_LARGE_PAGE #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 0132831b3081..c56ea8c84abb 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -460,5 +460,6 @@ #define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \ ((IH & 0x7) << 21)) +#define PPC_INVALIDATE_ERAT PPC_SLBIA(7) #endif /* _ASM_POWERPC_PPC_OPCODE_H */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 9cd4e8cbc78c..9e1499f98def 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -355,6 +355,7 @@ #define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */ #define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */ #define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */ +#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */ #define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */ #define LPCR_MER_SH 11 #define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */ diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 52ff3f025437..37c027ca83b2 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -98,8 +98,8 @@ _GLOBAL(__setup_cpu_power9) li r0,0 mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR - ori r3, r3, LPCR_PECEDH - ori r3, r3, LPCR_HVICE + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) + or r3, r3, r4 bl __init_LPCR bl __init_HFSCR bl __init_tlb_power9 @@ -118,8 +118,8 @@ _GLOBAL(__restore_cpu_power9) li r0,0 mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR - ori r3, r3, LPCR_PECEDH - ori r3, r3, LPCR_HVICE + LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) + or r3, r3, r4 bl __init_LPCR bl __init_HFSCR bl __init_tlb_power9 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 08ba447a4b3d..1ba82ea90230 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -116,7 +116,9 @@ EXC_VIRT_NONE(0x4000, 0x4100) EXC_REAL_BEGIN(system_reset, 0x100, 0x200) SET_SCRATCH0(r13) - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, + GET_PACA(r13) + clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */ + EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD, IDLETEST, 0x100) EXC_REAL_END(system_reset, 0x100, 0x200) @@ -124,6 +126,9 @@ EXC_VIRT_NONE(0x4100, 0x4200) #ifdef CONFIG_PPC_P7_NAP EXC_COMMON_BEGIN(system_reset_idle_common) +BEGIN_FTR_SECTION + GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */ +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) bl pnv_restore_hyp_resource li r0,PNV_THREAD_RUNNING @@ -169,7 +174,7 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x300) SET_SCRATCH0(r13) /* save r13 */ /* * Running native on arch 2.06 or later, we may wakeup from winkle - * inside machine check. If yes, then last bit of HSPGR0 would be set + * inside machine check. If yes, then last bit of HSPRG0 would be set * to 1. Hence clear it unconditionally. */ GET_PACA(r13) @@ -388,7 +393,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) /* * Go back to winkle. Please note that this thread was woken up in * machine check from winkle and have not restored the per-subcore - * state. Hence before going back to winkle, set last bit of HSPGR0 + * state. Hence before going back to winkle, set last bit of HSPRG0 * to 1. This will make sure that if this thread gets woken up * again at reset vector 0x100 then it will get chance to restore * the subcore state. diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ce6dc61b15b2..49a680d5ae37 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1215,7 +1215,7 @@ static void show_instructions(struct pt_regs *regs) int instr; if (!(i % 8)) - printk("\n"); + pr_cont("\n"); #if !defined(CONFIG_BOOKE) /* If executing with the IMMU off, adjust pc rather @@ -1227,18 +1227,18 @@ static void show_instructions(struct pt_regs *regs) if (!__kernel_text_address(pc) || probe_kernel_address((unsigned int __user *)pc, instr)) { - printk(KERN_CONT "XXXXXXXX "); + pr_cont("XXXXXXXX "); } else { if (regs->nip == pc) - printk(KERN_CONT "<%08x> ", instr); + pr_cont("<%08x> ", instr); else - printk(KERN_CONT "%08x ", instr); + pr_cont("%08x ", instr); } pc += sizeof(int); } - printk("\n"); + pr_cont("\n"); } struct regbit { @@ -1282,7 +1282,7 @@ static void print_bits(unsigned long val, struct regbit *bits, const char *sep) for (; bits->bit; ++bits) if (val & bits->bit) { - printk("%s%s", s, bits->name); + pr_cont("%s%s", s, bits->name); s = sep; } } @@ -1305,9 +1305,9 @@ static void print_tm_bits(unsigned long val) * T: Transactional (bit 34) */ if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { - printk(",TM["); + pr_cont(",TM["); print_bits(val, msr_tm_bits, ""); - printk("]"); + pr_cont("]"); } } #else @@ -1316,10 +1316,10 @@ static void print_tm_bits(unsigned long val) {} static void print_msr_bits(unsigned long val) { - printk("<"); + pr_cont("<"); print_bits(val, msr_bits, ","); print_tm_bits(val); - printk(">"); + pr_cont(">"); } #ifdef CONFIG_PPC64 @@ -1347,29 +1347,29 @@ void show_regs(struct pt_regs * regs) printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); trap = TRAP(regs); if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) - printk("CFAR: "REG" ", regs->orig_gpr3); + pr_cont("CFAR: "REG" ", regs->orig_gpr3); if (trap == 0x200 || trap == 0x300 || trap == 0x600) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); + pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); #else - printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); + pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); #endif #ifdef CONFIG_PPC64 - printk("SOFTE: %ld ", regs->softe); + pr_cont("SOFTE: %ld ", regs->softe); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) - printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); + pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); #endif for (i = 0; i < 32; i++) { if ((i % REGS_PER_LINE) == 0) - printk("\nGPR%02d: ", i); - printk(REG " ", regs->gpr[i]); + pr_cont("\nGPR%02d: ", i); + pr_cont(REG " ", regs->gpr[i]); if (i == LAST_VOLATILE && !FULL_REGS(regs)) break; } - printk("\n"); + pr_cont("\n"); #ifdef CONFIG_KALLSYMS /* * Lookup NIP late so we have the best change of getting the @@ -1900,14 +1900,14 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((ip == rth) && curr_frame >= 0) { - printk(" (%pS)", + pr_cont(" (%pS)", (void *)current->ret_stack[curr_frame].ret); curr_frame--; } #endif if (firstframe) - printk(" (unreliable)"); - printk("\n"); + pr_cont(" (unreliable)"); + pr_cont("\n"); } firstframe = 0; diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 7ac8e6eaab5b..8d586cff8a41 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -226,17 +226,25 @@ static void __init configure_exceptions(void) if (firmware_has_feature(FW_FEATURE_OPAL)) opal_configure_cores(); - /* Enable AIL if supported, and we are in hypervisor mode */ - if (early_cpu_has_feature(CPU_FTR_HVMODE) && - early_cpu_has_feature(CPU_FTR_ARCH_207S)) { - unsigned long lpcr = mfspr(SPRN_LPCR); - mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); - } + /* AIL on native is done in cpu_ready_for_interrupts() */ } } static void cpu_ready_for_interrupts(void) { + /* + * Enable AIL if supported, and we are in hypervisor mode. This + * is called once for every processor. + * + * If we are not in hypervisor mode the job is done once for + * the whole partition in configure_exceptions(). + */ + if (early_cpu_has_feature(CPU_FTR_HVMODE) && + early_cpu_has_feature(CPU_FTR_ARCH_207S)) { + unsigned long lpcr = mfspr(SPRN_LPCR); + mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); + } + /* Set IR and DR in PACA MSR */ get_paca()->kernel_msr = MSR_KERNEL; } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 44d3c3a38e3e..78dabf065ba9 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -193,8 +193,12 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) /* * Kernel read only mapped with ppp bits 0b110 */ - if (!(pteflags & _PAGE_WRITE)) - rflags |= (HPTE_R_PP0 | 0x2); + if (!(pteflags & _PAGE_WRITE)) { + if (mmu_has_feature(MMU_FTR_KERNEL_RO)) + rflags |= (HPTE_R_PP0 | 0x2); + else + rflags |= 0x3; + } } else { if (pteflags & _PAGE_RWX) rflags |= 0x2; @@ -1029,6 +1033,10 @@ void hash__early_init_mmu_secondary(void) { /* Initialize hash table for that CPU */ if (!firmware_has_feature(FW_FEATURE_LPAR)) { + + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + update_hid_for_hash(); + if (!cpu_has_feature(CPU_FTR_ARCH_300)) mtspr(SPRN_SDR1, _SDR1); else diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index ed7bddc456b7..688b54517655 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -388,6 +388,10 @@ void radix__early_init_mmu_secondary(void) * update partition table control register and UPRT */ if (!firmware_has_feature(FW_FEATURE_LPAR)) { + + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + update_hid_for_radix(); + lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index bda8c43be78a..3493cf4e0452 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c @@ -50,6 +50,8 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { __tlbiel_pid(pid, set, ric); } + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); return; } @@ -83,6 +85,8 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); asm volatile("ptesync": : :"memory"); + if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } static inline void _tlbie_va(unsigned long va, unsigned long pid, diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index b23c76b42d6e..165ecdd24d22 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -43,6 +43,7 @@ config SPARC select ARCH_HAS_SG_CHAIN select CPU_NO_EFFICIENT_FFS select HAVE_ARCH_HARDENED_USERCOPY + select PROVE_LOCKING_SMALL if PROVE_LOCKING config SPARC32 def_bool !64BIT @@ -89,6 +90,14 @@ config ARCH_DEFCONFIG config ARCH_PROC_KCORE_TEXT def_bool y +config ARCH_ATU + bool + default y if SPARC64 + +config ARCH_DMA_ADDR_T_64BIT + bool + default y if ARCH_ATU + config IOMMU_HELPER bool default y if SPARC64 @@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y if SPARC64 +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + default "13" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 13 means that the largest free memory block is 2^12 pages. + source "mm/Kconfig" if SPARC64 diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 666d5ba230d2..73cb8978df58 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h @@ -2335,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle, */ #define HV_FAST_PCI_MSG_SETVALID 0xd3 +/* PCI IOMMU v2 definitions and services + * + * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO + * definitions and services. + * + * CTE Clump Table Entry. First level table entry in the ATU. + * + * pci_device_list + * A 32-bit aligned list of pci_devices. + * + * pci_device_listp + * real address of a pci_device_list. 32-bit aligned. + * + * iotte IOMMU translation table entry. + * + * iotte_attributes + * IO Attributes for IOMMU v2 mappings. In addition to + * read, write IOMMU v2 supports relax ordering + * + * io_page_list A 64-bit aligned list of real addresses. Each real + * address in an io_page_list must be properly aligned + * to the pagesize of the given IOTSB. + * + * io_page_list_p Real address of an io_page_list, 64-bit aligned. + * + * IOTSB IO Translation Storage Buffer. An aligned table of + * IOTTEs. Each IOTSB has a pagesize, table size, and + * virtual address associated with it that must match + * a pagesize and table size supported by the un-derlying + * hardware implementation. The alignment requirements + * for an IOTSB depend on the pagesize used for that IOTSB. + * Each IOTTE in an IOTSB maps one pagesize-sized page. + * The size of the IOTSB dictates how large of a virtual + * address space the IOTSB is capable of mapping. + * + * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus + * iotsb_handle represents a binding of an IOTSB to a + * PCI root complex. + * + * iotsb_index Zero-based IOTTE number within an IOTSB. + */ + +/* The index_count argument consists of two fields: + * bits 63:48 #iottes and bits 47:0 iotsb_index + */ +#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \ + (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index))) + +/* pci_iotsb_conf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_CONF + * ARG0: devhandle + * ARG1: r_addr + * ARG2: size + * ARG3: pagesize + * ARG4: iova + * RET0: status + * RET1: iotsb_handle + * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize + * EBADALIGN r_addr is not properly aligned + * ENORADDR r_addr is not a valid real address + * ETOOMANY No further IOTSBs may be configured + * EBUSY Duplicate devhandle, raddir, iova combination + * + * Create an IOTSB suitable for the PCI root complex identified by devhandle, + * for the DMA virtual address defined by the argument iova. + * + * r_addr is the properly aligned base address of the IOTSB and size is the + * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to + * being configured. If it contains any values other than zeros then the + * behavior is undefined. + * + * pagesize is the size of each page in the IOTSB. Note that the combination of + * size (table size) and pagesize must be valid. + * + * virt is the DMA virtual address this IOTSB will map. + * + * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1. + * Once configured, privileged access to the IOTSB memory is prohibited and + * creates undefined behavior. The only permitted access is indirect via these + * services. + */ +#define HV_FAST_PCI_IOTSB_CONF 0x190 + +/* pci_iotsb_info() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_INFO + * ARG0: devhandle + * ARG1: iotsb_handle + * RET0: status + * RET1: r_addr + * RET2: size + * RET3: pagesize + * RET4: iova + * RET5: #bound + * ERRORS: EINVAL Invalid devhandle or iotsb_handle + * + * This service returns configuration information about an IOTSB previously + * created with pci_iotsb_conf. + * + * iotsb_handle value 0 may be used with this service to inquire about the + * legacy IOTSB that may or may not exist. If the service succeeds, the return + * values describe the legacy IOTSB and I/O virtual addresses mapped by that + * table. However, the table base address r_addr may contain the value -1 which + * indicates a memory range that cannot be accessed or be reclaimed. + * + * The return value #bound contains the number of PCI devices that iotsb_handle + * is currently bound to. + */ +#define HV_FAST_PCI_IOTSB_INFO 0x191 + +/* pci_iotsb_unconf() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF + * ARG0: devhandle + * ARG1: iotsb_handle + * RET0: status + * ERRORS: EINVAL Invalid devhandle or iotsb_handle + * EBUSY The IOTSB is bound and may not be unconfigured + * + * This service unconfigures the IOTSB identified by the devhandle and + * iotsb_handle arguments, previously created with pci_iotsb_conf. + * The IOTSB must not be currently bound to any device or the service will fail + * + * If the call succeeds, iotsb_handle is no longer valid. + */ +#define HV_FAST_PCI_IOTSB_UNCONF 0x192 + +/* pci_iotsb_bind() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_BIND + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: pci_device + * RET0: status + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device + * EBUSY A PCI function is already bound to an IOTSB at the same + * address range as specified by devhandle, iotsb_handle. + * + * This service binds the PCI function specified by the argument pci_device to + * the IOTSB specified by the arguments devhandle and iotsb_handle. + * + * The PCI device function is bound to the specified IOTSB with the IOVA range + * specified when the IOTSB was configured via pci_iotsb_conf. If the function + * is already bound then it is unbound first. + */ +#define HV_FAST_PCI_IOTSB_BIND 0x193 + +/* pci_iotsb_unbind() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: pci_device + * RET0: status + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device + * ENOMAP The PCI function was not bound to the specified IOTSB + * + * This service unbinds the PCI device specified by the argument pci_device + * from the IOTSB identified * by the arguments devhandle and iotsb_handle. + * + * If the PCI device is not bound to the specified IOTSB then this service will + * fail with status ENOMAP + */ +#define HV_FAST_PCI_IOTSB_UNBIND 0x194 + +/* pci_iotsb_get_binding() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iova + * RET0: status + * RET1: iotsb_handle + * ERRORS: EINVAL Invalid devhandle, pci_device, or iova + * ENOMAP The PCI function is not bound to an IOTSB at iova + * + * This service returns the IOTSB binding, iotsb_handle, for a given pci_device + * and DMA virtual address, iova. + * + * iova must be the base address of a DMA virtual address range as defined by + * the iommu-address-ranges property in the root complex device node defined + * by the argument devhandle. + */ +#define HV_FAST_PCI_IOTSB_GET_BINDING 0x195 + +/* pci_iotsb_map() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_MAP + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: index_count + * ARG3: iotte_attributes + * ARG4: io_page_list_p + * RET0: status + * RET1: #mapped + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes, + * iotsb_index or iotte_attributes + * EBADALIGN Improperly aligned io_page_list_p or I/O page + * address in the I/O page list. + * ENORADDR Invalid io_page_list_p or I/O page address in + * the I/O page list. + * + * This service creates and flushes mappings in the IOTSB defined by the + * arguments devhandle, iotsb. + * + * The index_count argument consists of two fields. Bits 63:48 contain #iotte + * and bits 47:0 contain iotsb_index + * + * The first mapping is created in the IOTSB index specified by iotsb_index. + * Subsequent mappings are created at iotsb_index+1 and so on. + * + * The attributes of each mapping are defined by the argument iotte_attributes. + * + * The io_page_list_p specifies the real address of the 64-bit-aligned list of + * #iottes I/O page addresses. Each page address must be a properly aligned + * real address of a page to be mapped in the IOTSB. The first entry in the I/O + * page list contains the real address of the first page, the 2nd entry for the + * 2nd page, and so on. + * + * #iottes must be greater than zero. + * + * The return value #mapped is the actual number of mappings created, which may + * be less than or equal to the argument #iottes. If the function returns + * successfully with a #mapped value less than the requested #iottes then the + * caller should continue to invoke the service with updated iotsb_index, + * #iottes, and io_page_list_p arguments until all pages are mapped. + * + * This service must not be used to demap a mapping. In other words, all + * mappings must be valid and have one or both of the RW attribute bits set. + * + * Note: + * It is implementation-defined whether I/O page real address validity checking + * is done at time mappings are established or deferred until they are + * accessed. + */ +#define HV_FAST_PCI_IOTSB_MAP 0x196 + +/* pci_iotsb_map_one() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iotsb_index + * ARG3: iotte_attributes + * ARG4: r_addr + * RET0: status + * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index + * or iotte_attributes + * EBADALIGN Improperly aligned r_addr + * ENORADDR Invalid r_addr + * + * This service creates and flushes a single mapping in the IOTSB defined by the + * arguments devhandle, iotsb. + * + * The mapping for the page at r_addr is created at the IOTSB index specified by + * iotsb_index with the attributes iotte_attributes. + * + * This service must not be used to demap a mapping. In other words, the mapping + * must be valid and have one or both of the RW attribute bits set. + * + * Note: + * It is implementation-defined whether I/O page real address validity checking + * is done at time mappings are established or deferred until they are + * accessed. + */ +#define HV_FAST_PCI_IOTSB_MAP_ONE 0x197 + +/* pci_iotsb_demap() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iotsb_index + * ARG3: #iottes + * RET0: status + * RET1: #unmapped + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes + * + * This service unmaps and flushes up to #iottes mappings starting at index + * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb. + * + * #iottes must be greater than zero. + * + * The actual number of IOTTEs unmapped is returned in #unmapped and may be less + * than or equal to the requested number of IOTTEs, #iottes. + * + * If #unmapped is less than #iottes, the caller should continue to invoke this + * service with updated iotsb_index and #iottes arguments until all pages are + * demapped. + */ +#define HV_FAST_PCI_IOTSB_DEMAP 0x198 + +/* pci_iotsb_getmap() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iotsb_index + * RET0: status + * RET1: r_addr + * RET2: iotte_attributes + * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index + * ENOMAP No mapping was found + * + * This service returns the mapping specified by index iotsb_index from the + * IOTSB defined by the arguments devhandle, iotsb. + * + * Upon success, the real address of the mapping shall be returned in + * r_addr and thethe IOTTE mapping attributes shall be returned in + * iotte_attributes. + * + * The return value iotte_attributes may not include optional features used in + * the call to create the mapping. + */ +#define HV_FAST_PCI_IOTSB_GETMAP 0x199 + +/* pci_iotsb_sync_mappings() + * TRAP: HV_FAST_TRAP + * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS + * ARG0: devhandle + * ARG1: iotsb_handle + * ARG2: iotsb_index + * ARG3: #iottes + * RET0: status + * RET1: #synced + * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes + * + * This service synchronizes #iottes mappings starting at index iotsb_index in + * the IOTSB defined by the arguments devhandle, iotsb. + * + * #iottes must be greater than zero. + * + * The actual number of IOTTEs synchronized is returned in #synced, which may + * be less than or equal to the requested number, #iottes. + * + * Upon a successful return, #synced is less than #iottes, the caller should + * continue to invoke this service with updated iotsb_index and #iottes + * arguments until all pages are synchronized. + */ +#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a + /* Logical Domain Channel services. */ #define LDC_CHANNEL_DOWN 0 @@ -2993,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num, #define HV_GRP_SDIO 0x0108 #define HV_GRP_SDIO_ERR 0x0109 #define HV_GRP_REBOOT_DATA 0x0110 +#define HV_GRP_ATU 0x0111 #define HV_GRP_M7_PERF 0x0114 #define HV_GRP_NIAG_PERF 0x0200 #define HV_GRP_FIRE_PERF 0x0201 diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h index cd0d69fa7592..f24f356f2503 100644 --- a/arch/sparc/include/asm/iommu_64.h +++ b/arch/sparc/include/asm/iommu_64.h @@ -24,8 +24,36 @@ struct iommu_arena { unsigned int limit; }; +#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */ + +/* Data structures for SPARC ATU architecture */ +struct atu_iotsb { + void *table; /* IOTSB table base virtual addr*/ + u64 ra; /* IOTSB table real addr */ + u64 dvma_size; /* ranges[3].size or OS slected 32G size */ + u64 dvma_base; /* ranges[3].base */ + u64 table_size; /* IOTSB table size */ + u64 page_size; /* IO PAGE size for IOTSB */ + u32 iotsb_num; /* tsbnum is same as iotsb_handle */ +}; + +struct atu_ranges { + u64 base; + u64 size; +}; + +struct atu { + struct atu_ranges *ranges; + struct atu_iotsb *iotsb; + struct iommu_map_table tbl; + u64 base; + u64 size; + u64 dma_addr_mask; +}; + struct iommu { struct iommu_map_table tbl; + struct atu *atu; spinlock_t lock; u32 dma_addr_mask; iopte_t *page_table; diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 662500fa555f..267731234ce8 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -39,6 +39,7 @@ static struct api_info api_table[] = { { .group = HV_GRP_SDIO, }, { .group = HV_GRP_SDIO_ERR, }, { .group = HV_GRP_REBOOT_DATA, }, + { .group = HV_GRP_ATU, .flags = FLAG_PRE_API }, { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, { .group = HV_GRP_FIRE_PERF, }, { .group = HV_GRP_N2_CPU, }, diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 5c615abff030..852a3291db96 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask) struct iommu *iommu = dev->archdata.iommu; u64 dma_addr_mask = iommu->dma_addr_mask; - if (device_mask >= (1UL << 32UL)) - return 0; + if (device_mask > DMA_BIT_MASK(32)) { + if (iommu->atu) + dma_addr_mask = iommu->atu->dma_addr_mask; + else + return 0; + } if ((device_mask & dma_addr_mask) == dma_addr_mask) return 1; diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h index b40cec252905..828493329f68 100644 --- a/arch/sparc/kernel/iommu_common.h +++ b/arch/sparc/kernel/iommu_common.h @@ -13,7 +13,6 @@ #include #include #include -#include #include diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index db57d8acdc01..06981cc716b6 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = { { .major = 1, .minor = 1 }, }; +static unsigned long vatu_major = 1; +static unsigned long vatu_minor = 1; + #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) struct iommu_batch { @@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns } /* Interrupts must be disabled. */ -static long iommu_batch_flush(struct iommu_batch *p) +static long iommu_batch_flush(struct iommu_batch *p, u64 mask) { struct pci_pbm_info *pbm = p->dev->archdata.host_controller; + u64 *pglist = p->pglist; + u64 index_count; unsigned long devhandle = pbm->devhandle; unsigned long prot = p->prot; unsigned long entry = p->entry; - u64 *pglist = p->pglist; unsigned long npages = p->npages; + unsigned long iotsb_num; + unsigned long ret; + long num; /* VPCI maj=1, min=[0,1] only supports read and write */ if (vpci_major < 2) prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); while (npages != 0) { - long num; - - num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), - npages, prot, __pa(pglist)); - if (unlikely(num < 0)) { - if (printk_ratelimit()) - printk("iommu_batch_flush: IOMMU map of " - "[%08lx:%08llx:%lx:%lx:%lx] failed with " - "status %ld\n", - devhandle, HV_PCI_TSBID(0, entry), - npages, prot, __pa(pglist), num); - return -1; + if (mask <= DMA_BIT_MASK(32)) { + num = pci_sun4v_iommu_map(devhandle, + HV_PCI_TSBID(0, entry), + npages, + prot, + __pa(pglist)); + if (unlikely(num < 0)) { + pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n", + __func__, + devhandle, + HV_PCI_TSBID(0, entry), + npages, prot, __pa(pglist), + num); + return -1; + } + } else { + index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), + iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; + ret = pci_sun4v_iotsb_map(devhandle, + iotsb_num, + index_count, + prot, + __pa(pglist), + &num); + if (unlikely(ret != HV_EOK)) { + pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n", + __func__, + devhandle, iotsb_num, + index_count, prot, + __pa(pglist), ret); + return -1; + } } - entry += num; npages -= num; pglist += num; @@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p) return 0; } -static inline void iommu_batch_new_entry(unsigned long entry) +static inline void iommu_batch_new_entry(unsigned long entry, u64 mask) { struct iommu_batch *p = this_cpu_ptr(&iommu_batch); if (p->entry + p->npages == entry) return; if (p->entry != ~0UL) - iommu_batch_flush(p); + iommu_batch_flush(p, mask); p->entry = entry; } /* Interrupts must be disabled. */ -static inline long iommu_batch_add(u64 phys_page) +static inline long iommu_batch_add(u64 phys_page, u64 mask) { struct iommu_batch *p = this_cpu_ptr(&iommu_batch); @@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page) p->pglist[p->npages++] = phys_page; if (p->npages == PGLIST_NENTS) - return iommu_batch_flush(p); + return iommu_batch_flush(p, mask); return 0; } /* Interrupts must be disabled. */ -static inline long iommu_batch_end(void) +static inline long iommu_batch_end(u64 mask) { struct iommu_batch *p = this_cpu_ptr(&iommu_batch); BUG_ON(p->npages >= PGLIST_NENTS); - return iommu_batch_flush(p); + return iommu_batch_flush(p, mask); } static void *dma_4v_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addrp, gfp_t gfp, unsigned long attrs) { + u64 mask; unsigned long flags, order, first_page, npages, n; unsigned long prot = 0; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; struct page *page; void *ret; long entry; @@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, memset((char *)first_page, 0, PAGE_SIZE << order); iommu = dev->archdata.iommu; + atu = iommu->atu; - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + mask = dev->coherent_dma_mask; + if (mask <= DMA_BIT_MASK(32)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; + + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, (unsigned long)(-1), 0); if (unlikely(entry == IOMMU_ERROR_CODE)) goto range_alloc_fail; - *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); + *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); ret = (void *) first_page; first_page = __pa(first_page); @@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, entry); for (n = 0; n < npages; n++) { - long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); + long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask); if (unlikely(err < 0L)) goto iommu_map_fail; } - if (unlikely(iommu_batch_end() < 0L)) + if (unlikely(iommu_batch_end(mask) < 0L)) goto iommu_map_fail; local_irq_restore(flags); @@ -206,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, return ret; iommu_map_fail: - iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); + iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); range_alloc_fail: free_pages(first_page, order); return NULL; } -static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, - unsigned long npages) +unsigned long dma_4v_iotsb_bind(unsigned long devhandle, + unsigned long iotsb_num, + struct pci_bus *bus_dev) +{ + struct pci_dev *pdev; + unsigned long err; + unsigned int bus; + unsigned int device; + unsigned int fun; + + list_for_each_entry(pdev, &bus_dev->devices, bus_list) { + if (pdev->subordinate) { + /* No need to bind pci bridge */ + dma_4v_iotsb_bind(devhandle, iotsb_num, + pdev->subordinate); + } else { + bus = bus_dev->number; + device = PCI_SLOT(pdev->devfn); + fun = PCI_FUNC(pdev->devfn); + err = pci_sun4v_iotsb_bind(devhandle, iotsb_num, + HV_PCI_DEVICE_BUILD(bus, + device, + fun)); + + /* If bind fails for one device it is going to fail + * for rest of the devices because we are sharing + * IOTSB. So in case of failure simply return with + * error. + */ + if (err) + return err; + } + } + + return 0; +} + +static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, + dma_addr_t dvma, unsigned long iotsb_num, + unsigned long entry, unsigned long npages) { - u32 devhandle = *(u32 *)demap_arg; unsigned long num, flags; + unsigned long ret; local_irq_save(flags); do { - num = pci_sun4v_iommu_demap(devhandle, - HV_PCI_TSBID(0, entry), - npages); - + if (dvma <= DMA_BIT_MASK(32)) { + num = pci_sun4v_iommu_demap(devhandle, + HV_PCI_TSBID(0, entry), + npages); + } else { + ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num, + entry, npages, &num); + if (unlikely(ret != HV_EOK)) { + pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n", + ret); + } + } entry += num; npages -= num; } while (npages != 0); @@ -236,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, { struct pci_pbm_info *pbm; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; unsigned long order, npages, entry; + unsigned long iotsb_num; u32 devhandle; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; + atu = iommu->atu; devhandle = pbm->devhandle; - entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); - dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); + + if (dvma <= DMA_BIT_MASK(32)) { + tbl = &iommu->tbl; + iotsb_num = 0; /* we don't care for legacy iommu */ + } else { + tbl = &atu->tbl; + iotsb_num = atu->iotsb->iotsb_num; + } + entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); + dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); + iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); order = get_order(size); if (order < 10) free_pages((unsigned long)cpu, order); @@ -257,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, unsigned long attrs) { struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + u64 mask; unsigned long flags, npages, oaddr; unsigned long i, base_paddr; - u32 bus_addr, ret; unsigned long prot; + dma_addr_t bus_addr, ret; long entry; iommu = dev->archdata.iommu; + atu = iommu->atu; if (unlikely(direction == DMA_NONE)) goto bad; @@ -272,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, + mask = *dev->dma_mask; + if (mask <= DMA_BIT_MASK(32)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; + + entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, (unsigned long)(-1), 0); if (unlikely(entry == IOMMU_ERROR_CODE)) goto bad; - bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); + bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); ret = bus_addr | (oaddr & ~IO_PAGE_MASK); base_paddr = __pa(oaddr & IO_PAGE_MASK); prot = HV_PCI_MAP_ATTR_READ; @@ -293,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, iommu_batch_start(dev, prot, entry); for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { - long err = iommu_batch_add(base_paddr); + long err = iommu_batch_add(base_paddr, mask); if (unlikely(err < 0L)) goto iommu_map_fail; } - if (unlikely(iommu_batch_end() < 0L)) + if (unlikely(iommu_batch_end(mask) < 0L)) goto iommu_map_fail; local_irq_restore(flags); @@ -310,7 +414,7 @@ bad: return DMA_ERROR_CODE; iommu_map_fail: - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); return DMA_ERROR_CODE; } @@ -320,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, { struct pci_pbm_info *pbm; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; unsigned long npages; + unsigned long iotsb_num; long entry; u32 devhandle; @@ -332,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; + atu = iommu->atu; devhandle = pbm->devhandle; npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); npages >>= IO_PAGE_SHIFT; bus_addr &= IO_PAGE_MASK; - entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; - dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); + + if (bus_addr <= DMA_BIT_MASK(32)) { + iotsb_num = 0; /* we don't care for legacy iommu */ + tbl = &iommu->tbl; + } else { + iotsb_num = atu->iotsb->iotsb_num; + tbl = &atu->tbl; + } + entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; + dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); + iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); } static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, @@ -353,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, unsigned long seg_boundary_size; int outcount, incount, i; struct iommu *iommu; + struct atu *atu; + struct iommu_map_table *tbl; + u64 mask; unsigned long base_shift; long err; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; + atu = iommu->atu; + if (nelems == 0 || !iommu) return 0; @@ -384,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, max_seg_size = dma_get_max_seg_size(dev); seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, IO_PAGE_SIZE) >> IO_PAGE_SHIFT; - base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; + + mask = *dev->dma_mask; + if (mask <= DMA_BIT_MASK(32)) + tbl = &iommu->tbl; + else + tbl = &atu->tbl; + + base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; + for_each_sg(sglist, s, nelems, i) { unsigned long paddr, npages, entry, out_entry = 0, slen; @@ -397,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, /* Allocate iommu entries for that segment */ paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); - entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, + entry = iommu_tbl_range_alloc(dev, tbl, npages, &handle, (unsigned long)(-1), 0); /* Handle failure */ if (unlikely(entry == IOMMU_ERROR_CODE)) { - if (printk_ratelimit()) - printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" - " npages %lx\n", iommu, paddr, npages); + pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n", + tbl, paddr, npages); goto iommu_map_failed; } - iommu_batch_new_entry(entry); + iommu_batch_new_entry(entry, mask); /* Convert entry to a dma_addr_t */ - dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); + dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); dma_addr |= (s->offset & ~IO_PAGE_MASK); /* Insert into HW table */ paddr &= IO_PAGE_MASK; while (npages--) { - err = iommu_batch_add(paddr); + err = iommu_batch_add(paddr, mask); if (unlikely(err < 0L)) goto iommu_map_failed; paddr += IO_PAGE_SIZE; @@ -452,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, dma_next = dma_addr + slen; } - err = iommu_batch_end(); + err = iommu_batch_end(mask); if (unlikely(err < 0L)) goto iommu_map_failed; @@ -475,7 +603,7 @@ iommu_map_failed: vaddr = s->dma_address & IO_PAGE_MASK; npages = iommu_num_pages(s->dma_address, s->dma_length, IO_PAGE_SIZE); - iommu_tbl_range_free(&iommu->tbl, vaddr, npages, + iommu_tbl_range_free(tbl, vaddr, npages, IOMMU_ERROR_CODE); /* XXX demap? XXX */ s->dma_address = DMA_ERROR_CODE; @@ -496,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, struct pci_pbm_info *pbm; struct scatterlist *sg; struct iommu *iommu; + struct atu *atu; unsigned long flags, entry; + unsigned long iotsb_num; u32 devhandle; BUG_ON(direction == DMA_NONE); iommu = dev->archdata.iommu; pbm = dev->archdata.host_controller; + atu = iommu->atu; devhandle = pbm->devhandle; local_irq_save(flags); @@ -512,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, dma_addr_t dma_handle = sg->dma_address; unsigned int len = sg->dma_length; unsigned long npages; - struct iommu_map_table *tbl = &iommu->tbl; + struct iommu_map_table *tbl; unsigned long shift = IO_PAGE_SHIFT; if (!len) break; npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); + + if (dma_handle <= DMA_BIT_MASK(32)) { + iotsb_num = 0; /* we don't care for legacy iommu */ + tbl = &iommu->tbl; + } else { + iotsb_num = atu->iotsb->iotsb_num; + tbl = &atu->tbl; + } entry = ((dma_handle - tbl->table_map_base) >> shift); - dma_4v_iommu_demap(&devhandle, entry, npages); - iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, + dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num, + entry, npages); + iommu_tbl_range_free(tbl, dma_handle, npages, IOMMU_ERROR_CODE); sg = sg_next(sg); } @@ -581,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, return cnt; } +static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm) +{ + struct atu *atu = pbm->iommu->atu; + struct atu_iotsb *iotsb; + void *table; + u64 table_size; + u64 iotsb_num; + unsigned long order; + unsigned long err; + + iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL); + if (!iotsb) { + err = -ENOMEM; + goto out_err; + } + atu->iotsb = iotsb; + + /* calculate size of IOTSB */ + table_size = (atu->size / IO_PAGE_SIZE) * 8; + order = get_order(table_size); + table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!table) { + err = -ENOMEM; + goto table_failed; + } + iotsb->table = table; + iotsb->ra = __pa(table); + iotsb->dvma_size = atu->size; + iotsb->dvma_base = atu->base; + iotsb->table_size = table_size; + iotsb->page_size = IO_PAGE_SIZE; + + /* configure and register IOTSB with HV */ + err = pci_sun4v_iotsb_conf(pbm->devhandle, + iotsb->ra, + iotsb->table_size, + iotsb->page_size, + iotsb->dvma_base, + &iotsb_num); + if (err) { + pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err); + goto iotsb_conf_failed; + } + iotsb->iotsb_num = iotsb_num; + + err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus); + if (err) { + pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err); + goto iotsb_conf_failed; + } + + return 0; + +iotsb_conf_failed: + free_pages((unsigned long)table, order); +table_failed: + kfree(iotsb); +out_err: + return err; +} + +static int pci_sun4v_atu_init(struct pci_pbm_info *pbm) +{ + struct atu *atu = pbm->iommu->atu; + unsigned long err; + const u64 *ranges; + u64 map_size, num_iotte; + u64 dma_mask; + const u32 *page_size; + int len; + + ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges", + &len); + if (!ranges) { + pr_err(PFX "No iommu-address-ranges\n"); + return -EINVAL; + } + + page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes", + NULL); + if (!page_size) { + pr_err(PFX "No iommu-pagesizes\n"); + return -EINVAL; + } + + /* There are 4 iommu-address-ranges supported. Each range is pair of + * {base, size}. The ranges[0] and ranges[1] are 32bit address space + * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit + * address ranges to support 64bit addressing. Because 'size' for + * address ranges[2] and ranges[3] are same we can select either of + * ranges[2] or ranges[3] for mapping. However due to 'size' is too + * large for OS to allocate IOTSB we are using fix size 32G + * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices + * to share. + */ + atu->ranges = (struct atu_ranges *)ranges; + atu->base = atu->ranges[3].base; + atu->size = ATU_64_SPACE_SIZE; + + /* Create IOTSB */ + err = pci_sun4v_atu_alloc_iotsb(pbm); + if (err) { + pr_err(PFX "Error creating ATU IOTSB\n"); + return err; + } + + /* Create ATU iommu map. + * One bit represents one iotte in IOTSB table. + */ + dma_mask = (roundup_pow_of_two(atu->size) - 1UL); + num_iotte = atu->size / IO_PAGE_SIZE; + map_size = num_iotte / 8; + atu->tbl.table_map_base = atu->base; + atu->dma_addr_mask = dma_mask; + atu->tbl.map = kzalloc(map_size, GFP_KERNEL); + if (!atu->tbl.map) + return -ENOMEM; + + iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, + NULL, false /* no large_pool */, + 0 /* default npools */, + false /* want span boundary checking */); + + return 0; +} + static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) { static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; @@ -918,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm, pci_sun4v_scan_bus(pbm, &op->dev); + /* if atu_init fails its not complete failure. + * we can still continue using legacy iommu. + */ + if (pbm->iommu->atu) { + err = pci_sun4v_atu_init(pbm); + if (err) { + kfree(pbm->iommu->atu); + pbm->iommu->atu = NULL; + pr_err(PFX "ATU init failed, err=%d\n", err); + } + } + pbm->next = pci_pbm_root; pci_pbm_root = pbm; @@ -931,8 +1209,10 @@ static int pci_sun4v_probe(struct platform_device *op) struct pci_pbm_info *pbm; struct device_node *dp; struct iommu *iommu; + struct atu *atu; u32 devhandle; int i, err = -ENODEV; + static bool hv_atu = true; dp = op->dev.of_node; @@ -954,6 +1234,19 @@ static int pci_sun4v_probe(struct platform_device *op) pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", vpci_major, vpci_minor); + err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor); + if (err) { + /* don't return an error if we fail to register the + * ATU group, but ATU hcalls won't be available. + */ + hv_atu = false; + pr_err(PFX "Could not register hvapi ATU err=%d\n", + err); + } else { + pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", + vatu_major, vatu_minor); + } + dma_ops = &sun4v_dma_ops; } @@ -991,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op) } pbm->iommu = iommu; + iommu->atu = NULL; + if (hv_atu) { + atu = kzalloc(sizeof(*atu), GFP_KERNEL); + if (!atu) + pr_err(PFX "Could not allocate atu\n"); + else + iommu->atu = atu; + } err = pci_sun4v_pbm_init(pbm, op, devhandle); if (err) @@ -1001,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op) return 0; out_free_iommu: + kfree(iommu->atu); kfree(pbm->iommu); out_free_controller: diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h index 5642212390b2..22603a4e48bf 100644 --- a/arch/sparc/kernel/pci_sun4v.h +++ b/arch/sparc/kernel/pci_sun4v.h @@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, unsigned long msinum, unsigned long valid); +/* Sun4v HV IOMMU v2 APIs */ +unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle, + unsigned long ra, + unsigned long table_size, + unsigned long page_size, + unsigned long dvma_base, + u64 *iotsb_num); +unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle, + unsigned long iotsb_num, + unsigned int pci_device); +unsigned long pci_sun4v_iotsb_map(unsigned long devhandle, + unsigned long iotsb_num, + unsigned long iotsb_index_iottes, + unsigned long io_attributes, + unsigned long io_page_list_pa, + long *mapped); +unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle, + unsigned long iotsb_num, + unsigned long iotsb_index, + unsigned long iottes, + unsigned long *demapped); #endif /* !(_PCI_SUN4V_H) */ diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S index e606d46c6815..578f09657916 100644 --- a/arch/sparc/kernel/pci_sun4v_asm.S +++ b/arch/sparc/kernel/pci_sun4v_asm.S @@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid) mov %o0, %o0 ENDPROC(pci_sun4v_msg_setvalid) + /* + * %o0: devhandle + * %o1: r_addr + * %o2: size + * %o3: pagesize + * %o4: virt + * %o5: &iotsb_num/&iotsb_handle + * + * returns %o0: status + * %o1: iotsb_num/iotsb_handle + */ +ENTRY(pci_sun4v_iotsb_conf) + mov %o5, %g1 + mov HV_FAST_PCI_IOTSB_CONF, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%g1] +ENDPROC(pci_sun4v_iotsb_conf) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: pci_device + * + * returns %o0: status + */ +ENTRY(pci_sun4v_iotsb_bind) + mov HV_FAST_PCI_IOTSB_BIND, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(pci_sun4v_iotsb_bind) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: index_count + * %o3: iotte_attributes + * %o4: io_page_list_p + * %o5: &mapped + * + * returns %o0: status + * %o1: #mapped + */ +ENTRY(pci_sun4v_iotsb_map) + mov %o5, %g1 + mov HV_FAST_PCI_IOTSB_MAP, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%g1] +ENDPROC(pci_sun4v_iotsb_map) + + /* + * %o0: devhandle + * %o1: iotsb_num/iotsb_handle + * %o2: iotsb_index + * %o3: #iottes + * %o4: &demapped + * + * returns %o0: status + * %o1: #demapped + */ +ENTRY(pci_sun4v_iotsb_demap) + mov HV_FAST_PCI_IOTSB_DEMAP, %o5 + ta HV_FAST_TRAP + retl + stx %o1, [%o4] +ENDPROC(pci_sun4v_iotsb_demap) diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index c3c12efe0bc0..9c0c8fd0b292 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; /* 1. Make sure we are not getting garbage from the user */ - if (!invalid_frame_pointer(sf, sizeof(*sf))) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv_and_exit; if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) @@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) synchronize_user_stack(); sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; - if (!invalid_frame_pointer(sf, sizeof(*sf))) + if (invalid_frame_pointer(sf, sizeof(*sf))) goto segv; if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 439784b7b7ac..37aa537b3ad8 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -802,8 +802,10 @@ struct mdesc_mblock { }; static struct mdesc_mblock *mblocks; static int num_mblocks; +static int find_numa_node_for_addr(unsigned long pa, + struct node_mem_mask *pnode_mask); -static unsigned long ra_to_pa(unsigned long addr) +static unsigned long __init ra_to_pa(unsigned long addr) { int i; @@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr) return addr; } -static int find_node(unsigned long addr) +static int __init find_node(unsigned long addr) { + static bool search_mdesc = true; + static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL }; + static int last_index; int i; addr = ra_to_pa(addr); @@ -830,13 +835,30 @@ static int find_node(unsigned long addr) if ((addr & p->mask) == p->val) return i; } - /* The following condition has been observed on LDOM guests.*/ - WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" - " rule. Some physical memory will be owned by node 0."); - return 0; + /* The following condition has been observed on LDOM guests because + * node_masks only contains the best latency mask and value. + * LDOM guest's mdesc can contain a single latency group to + * cover multiple address range. Print warning message only if the + * address cannot be found in node_masks nor mdesc. + */ + if ((search_mdesc) && + ((addr & last_mem_mask.mask) != last_mem_mask.val)) { + /* find the available node in the mdesc */ + last_index = find_numa_node_for_addr(addr, &last_mem_mask); + numadbg("find_node: latency group for address 0x%lx is %d\n", + addr, last_index); + if ((last_index < 0) || (last_index >= num_node_masks)) { + /* WARN_ONCE() and use default group 0 */ + WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0."); + search_mdesc = false; + last_index = 0; + } + } + + return last_index; } -static u64 memblock_nid_range(u64 start, u64 end, int *nid) +static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) { *nid = find_node(start); start += PAGE_SIZE; @@ -1160,6 +1182,41 @@ int __node_distance(int from, int to) return numa_latency[from][to]; } +static int find_numa_node_for_addr(unsigned long pa, + struct node_mem_mask *pnode_mask) +{ + struct mdesc_handle *md = mdesc_grab(); + u64 node, arc; + int i = 0; + + node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); + if (node == MDESC_NODE_NULL) + goto out; + + mdesc_for_each_node_by_name(md, node, "group") { + mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) { + u64 target = mdesc_arc_target(md, arc); + struct mdesc_mlgroup *m = find_mlgroup(target); + + if (!m) + continue; + if ((pa & m->mask) == m->match) { + if (pnode_mask) { + pnode_mask->mask = m->mask; + pnode_mask->val = m->match; + } + mdesc_release(md); + return i; + } + } + i++; + } + +out: + mdesc_release(md); + return -1; +} + static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) { int i; diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h index 6160761d5f61..4810e48dbbbf 100644 --- a/arch/tile/include/asm/cache.h +++ b/arch/tile/include/asm/cache.h @@ -61,4 +61,7 @@ */ #define __write_once __read_mostly +/* __ro_after_init is the generic name for the tile arch __write_once. */ +#define __ro_after_init __read_mostly + #endif /* _ASM_TILE_CACHE_H */ diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 178989e6d3e3..ea960d660917 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) */ unsigned long long sched_clock(void) { - return clocksource_cyc2ns(get_cycles(), - sched_clock_mult, SCHED_CLOCK_SHIFT); + return mult_frac(get_cycles(), + sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT); } int setup_profiling_timer(unsigned int multiplier) diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 536ccfcc01c6..34d9e15857c3 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -40,8 +40,8 @@ GCOV_PROFILE := n UBSAN_SANITIZE :=n LDFLAGS := -m elf_$(UTS_MACHINE) -ifeq ($(CONFIG_RELOCATABLE),y) -# If kernel is relocatable, build compressed kernel as PIE. +# Compressed kernel should be built as PIE since it may be loaded at any +# address by the bootloader. ifeq ($(CONFIG_X86_32),y) LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) else @@ -51,7 +51,6 @@ else LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ && echo "-z noreloc-overflow -pie --no-dynamic-linker") endif -endif LDFLAGS_vmlinux := -T hostprogs-y := mkpiggy diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c index 26240dde081e..4224ede43b4e 100644 --- a/arch/x86/boot/cpu.c +++ b/arch/x86/boot/cpu.c @@ -87,6 +87,12 @@ int validate_cpu(void) return -1; } + if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) && + !has_eflag(X86_EFLAGS_ID)) { + printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n"); + return -1; + } + if (err_flags) { puts("This kernel requires the following features " "not present on the CPU:\n"); diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index f5f4b3fbbbc2..afb222b63cae 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void) pr_cont("Fam15h "); x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; break; - + case 0x17: + pr_cont("Fam17h "); + /* + * In family 17h, there are no event constraints in the PMC hardware. + * We fallback to using default amd_get_event_constraints. + */ + break; default: pr_err("core perfctr but no constraints; unknown hardware!\n"); return -ENODEV; diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d31735f37ed7..9d4bf3ab049e 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent frame.next_frame = 0; frame.return_address = 0; - if (!access_ok(VERIFY_READ, fp, 8)) + if (!valid_user_frame(fp, sizeof(frame))) break; bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); @@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent if (bytes != 0) break; - if (!valid_user_frame(fp, sizeof(frame))) - break; - perf_callchain_store(entry, cs_base + frame.return_address); fp = compat_ptr(ss_base + frame.next_frame); } @@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs frame.next_frame = NULL; frame.return_address = 0; - if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) + if (!valid_user_frame(fp, sizeof(frame))) break; bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); @@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs if (bytes != 0) break; - if (!valid_user_frame(fp, sizeof(frame))) - break; - perf_callchain_store(entry, frame.return_address); fp = (void __user *)frame.next_frame; } diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 0319311dbdbb..be202390bbd3 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event, } /* - * We use the interrupt regs as a base because the PEBS record - * does not contain a full regs set, specifically it seems to - * lack segment descriptors, which get used by things like - * user_mode(). + * We use the interrupt regs as a base because the PEBS record does not + * contain a full regs set, specifically it seems to lack segment + * descriptors, which get used by things like user_mode(). * - * In the simple case fix up only the IP and BP,SP regs, for - * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. - * A possible PERF_SAMPLE_REGS will have to transfer all regs. + * In the simple case fix up only the IP for PERF_SAMPLE_IP. + * + * We must however always use BP,SP from iregs for the unwinder to stay + * sane; the record BP,SP can point into thin air when the record is + * from a previous PMI context or an (I)RET happend between the record + * and PMI. */ *regs = *iregs; regs->flags = pebs->flags; set_linear_ip(regs, pebs->ip); - regs->bp = pebs->bp; - regs->sp = pebs->sp; if (sample_type & PERF_SAMPLE_REGS_INTR) { regs->ax = pebs->ax; @@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event, regs->dx = pebs->dx; regs->si = pebs->si; regs->di = pebs->di; - regs->bp = pebs->bp; - regs->sp = pebs->sp; - regs->flags = pebs->flags; + /* + * Per the above; only set BP,SP if we don't need callchains. + * + * XXX: does this make sense? + */ + if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { + regs->bp = pebs->bp; + regs->sp = pebs->sp; + } + + /* + * Preserve PERF_EFLAGS_VM from set_linear_ip(). + */ + regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM); #ifndef CONFIG_X86_32 regs->r8 = pebs->r8; regs->r9 = pebs->r9; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index efca2685d876..dbaaf7dc8373 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, */ static int uncore_pmu_event_init(struct perf_event *event); -static bool is_uncore_event(struct perf_event *event) +static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) { - return event->pmu->event_init == uncore_pmu_event_init; + return &box->pmu->pmu == event->pmu; } static int @@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, n = box->n_events; - if (is_uncore_event(leader)) { + if (is_box_event(box, leader)) { box->event_list[n] = leader; n++; } @@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, return n; list_for_each_entry(event, &leader->sibling_list, group_entry) { - if (!is_uncore_event(event) || + if (!is_box_event(box, event) || event->state <= PERF_EVENT_STATE_OFF) continue; diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 5f845eef9a4d..a3dcc12bef4a 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c @@ -8,8 +8,12 @@ #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 -#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f -#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c +#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 +#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c +#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 +#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 +#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f +#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f /* SNB event control */ #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff @@ -486,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags) snb_uncore_imc_event_start(event, 0); - box->n_events++; - return 0; } static void snb_uncore_imc_event_del(struct perf_event *event, int flags) { - struct intel_uncore_box *box = uncore_event_to_box(event); - int i; - snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); - - for (i = 0; i < box->n_events; i++) { - if (event == box->event_list[i]) { - --box->n_events; - break; - } - } } int snb_pci2phy_map_init(int devid) @@ -616,13 +608,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = { static const struct pci_device_id skl_uncore_pci_ids[] = { { /* IMC */ - PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, { /* IMC */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, { /* end: all zeroes */ }, }; @@ -666,8 +674,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ - IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ + IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ + IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ + IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ + IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ + IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ { /* end marker */ } }; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 5874d8de1f8d..a77ee026643d 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -113,7 +113,7 @@ struct debug_store { * Per register state. */ struct er_account { - raw_spinlock_t lock; /* per-core: protect structure */ + raw_spinlock_t lock; /* per-core: protect structure */ u64 config; /* extra MSR config */ u64 reg; /* extra MSR number */ atomic_t ref; /* reference count */ diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index 5b6753d1f7f4..49da9f497b90 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h @@ -17,6 +17,7 @@ extern int intel_mid_pci_init(void); extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); +extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev); extern void intel_mid_pwr_power_off(void); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index b81fe2d63e15..1e81a37c034e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) #ifdef CONFIG_SMP unsigned bits; int cpu = smp_processor_id(); - unsigned int socket_id, core_complex_id; bits = c->x86_coreid_bits; /* Low order bits define the core id (index of core in socket) */ @@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) return; - socket_id = (c->apicid >> bits) - 1; - core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3; - - per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id; + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; #endif } diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9bd910a7dd0a..cc9e980c68ec 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -978,6 +978,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c) } } +/* + * The physical to logical package id mapping is initialized from the + * acpi/mptables information. Make sure that CPUID actually agrees with + * that. + */ +static void sanitize_package_id(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_SMP + unsigned int pkg, apicid, cpu = smp_processor_id(); + + apicid = apic->cpu_present_to_apicid(cpu); + pkg = apicid >> boot_cpu_data.x86_coreid_bits; + + if (apicid != c->initial_apicid) { + pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n", + cpu, apicid, c->initial_apicid); + c->initial_apicid = apicid; + } + if (pkg != c->phys_proc_id) { + pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n", + cpu, pkg, c->phys_proc_id); + c->phys_proc_id = pkg; + } + c->logical_proc_id = topology_phys_to_logical_pkg(pkg); +#else + c->logical_proc_id = 0; +#endif +} + /* * This does the hard work of actually picking apart the CPU stuff... */ @@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) #ifdef CONFIG_NUMA numa_add_cpu(smp_processor_id()); #endif - /* The boot/hotplug time assigment got cleared, restore it */ - c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id); + sanitize_package_id(c); } /* diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 9b7cf5c28f5f..85f854b98a9d 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, for (; stack < stack_info.end; stack++) { unsigned long real_addr; int reliable = 0; - unsigned long addr = *stack; + unsigned long addr = READ_ONCE_NOCHECK(*stack); unsigned long *ret_addr_p = unwind_get_return_address_ptr(&state); diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 47004010ad5d..ebb4e95fbd74 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu) { WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ - if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) { - /* FPU state will be reallocated lazily at the first use. */ - fpu__drop(fpu); - } else { - if (!fpu->fpstate_active) { - fpu__activate_curr(fpu); - user_fpu_begin(); - } + fpu__drop(fpu); + + /* + * Make sure fpstate is cleared and initialized. + */ + if (static_cpu_has(X86_FEATURE_FPU)) { + fpu__activate_curr(fpu); + user_fpu_begin(); copy_init_fpstate_to_fpregs(); } } diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index b6b2f0264af3..2dabea46f039 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS initial_pg_pmd: .fill 1024*KPMDS,4,0 #else -ENTRY(initial_page_table) +.globl initial_page_table +initial_page_table: .fill 1024,4,0 #endif initial_pg_fixmap: .fill 1024,4,0 -ENTRY(empty_zero_page) +.globl empty_zero_page +empty_zero_page: .fill 4096,1,0 -ENTRY(swapper_pg_dir) +.globl swapper_pg_dir +swapper_pg_dir: .fill 1024,4,0 EXPORT_SYMBOL(empty_zero_page) diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c index 764a29f84de7..85195d447a92 100644 --- a/arch/x86/kernel/sysfb_simplefb.c +++ b/arch/x86/kernel/sysfb_simplefb.c @@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si, { struct platform_device *pd; struct resource res; - unsigned long len; + u64 base, size; + u32 length; - /* don't use lfb_size as it may contain the whole VMEM instead of only - * the part that is occupied by the framebuffer */ - len = mode->height * mode->stride; - len = PAGE_ALIGN(len); - if (len > (u64)si->lfb_size << 16) { + /* + * If the 64BIT_BASE capability is set, ext_lfb_base will contain the + * upper half of the base address. Assemble the address, then make sure + * it is valid and we can actually access it. + */ + base = si->lfb_base; + if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE) + base |= (u64)si->ext_lfb_base << 32; + if (!base || (u64)(resource_size_t)base != base) { + printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n"); + return -EINVAL; + } + + /* + * Don't use lfb_size as IORESOURCE size, since it may contain the + * entire VMEM, and thus require huge mappings. Use just the part we + * need, that is, the part where the framebuffer is located. But verify + * that it does not exceed the advertised VMEM. + * Note that in case of VBE, the lfb_size is shifted by 16 bits for + * historical reasons. + */ + size = si->lfb_size; + if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) + size <<= 16; + length = mode->height * mode->stride; + length = PAGE_ALIGN(length); + if (length > size) { printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); return -EINVAL; } @@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si, memset(&res, 0, sizeof(res)); res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; res.name = simplefb_resname; - res.start = si->lfb_base; - res.end = si->lfb_base + len - 1; + res.start = base; + res.end = res.start + length - 1; if (res.end <= res.start) return -EINVAL; diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c index 2d721e533cf4..b80e8bf43cc6 100644 --- a/arch/x86/kernel/unwind_guess.c +++ b/arch/x86/kernel/unwind_guess.c @@ -7,11 +7,13 @@ unsigned long unwind_get_return_address(struct unwind_state *state) { + unsigned long addr = READ_ONCE_NOCHECK(*state->sp); + if (unwind_done(state)) return 0; return ftrace_graph_ret_addr(state->task, &state->graph_idx, - *state->sp, state->sp); + addr, state->sp); } EXPORT_SYMBOL_GPL(unwind_get_return_address); @@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state) return false; do { + unsigned long addr = READ_ONCE_NOCHECK(*state->sp); + for (state->sp++; state->sp < info->end; state->sp++) - if (__kernel_text_address(*state->sp)) + if (__kernel_text_address(addr)) return true; state->sp = info->next_sp; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index cbd7b92585bb..a3ce9d260d68 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt) static int em_jmp_far(struct x86_emulate_ctxt *ctxt) { int rc; - unsigned short sel, old_sel; - struct desc_struct old_desc, new_desc; - const struct x86_emulate_ops *ops = ctxt->ops; + unsigned short sel; + struct desc_struct new_desc; u8 cpl = ctxt->ops->cpl(ctxt); - /* Assignment of RIP may only fail in 64-bit mode */ - if (ctxt->mode == X86EMUL_MODE_PROT64) - ops->get_segment(ctxt, &old_sel, &old_desc, NULL, - VCPU_SREG_CS); - memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, @@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) return rc; rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); - if (rc != X86EMUL_CONTINUE) { - WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); - /* assigning eip failed; restore the old cs */ - ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); - return rc; - } + /* Error handling is not implemented. */ + if (rc != X86EMUL_CONTINUE) + return X86EMUL_UNHANDLEABLE; + return rc; } @@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) { int rc; unsigned long eip, cs; - u16 old_cs; int cpl = ctxt->ops->cpl(ctxt); - struct desc_struct old_desc, new_desc; - const struct x86_emulate_ops *ops = ctxt->ops; - - if (ctxt->mode == X86EMUL_MODE_PROT64) - ops->get_segment(ctxt, &old_cs, &old_desc, NULL, - VCPU_SREG_CS); + struct desc_struct new_desc; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) @@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) if (rc != X86EMUL_CONTINUE) return rc; rc = assign_eip_far(ctxt, eip, &new_desc); - if (rc != X86EMUL_CONTINUE) { - WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); - ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); - } + /* Error handling is not implemented. */ + if (rc != X86EMUL_CONTINUE) + return X86EMUL_UNHANDLEABLE; + return rc; } diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 1a22de70f7f7..6e219e5c07d2 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) { ioapic->rtc_status.pending_eoi = 0; - bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS); + bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); } static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index 7d2692a49657..1cc6e54436db 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -42,13 +42,13 @@ struct kvm_vcpu; struct dest_map { /* vcpu bitmap where IRQ has been sent */ - DECLARE_BITMAP(map, KVM_MAX_VCPUS); + DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); /* * Vector sent to a given vcpu, only valid when * the vcpu's bit in map is set */ - u8 vectors[KVM_MAX_VCPUS]; + u8 vectors[KVM_MAX_VCPU_ID]; }; diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 25810b144b58..6c0191615f23 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c @@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, bool line_status) { struct kvm_pic *pic = pic_irqchip(kvm); + + /* + * XXX: rejecting pic routes when pic isn't in use would be better, + * but the default routing table is installed while kvm->arch.vpic is + * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE. + */ + if (!pic) + return -1; + return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); } @@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, bool line_status) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; + + if (!ioapic) + return -1; + return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, line_status); } @@ -156,6 +169,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, } +static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, + struct kvm *kvm, int irq_source_id, int level, + bool line_status) +{ + if (!level) + return -1; + + return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); +} + int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) @@ -163,18 +186,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, struct kvm_lapic_irq irq; int r; - if (unlikely(e->type != KVM_IRQ_ROUTING_MSI)) - return -EWOULDBLOCK; + switch (e->type) { + case KVM_IRQ_ROUTING_HV_SINT: + return kvm_hv_set_sint(e, kvm, irq_source_id, level, + line_status); - if (kvm_msi_route_invalid(kvm, e)) - return -EINVAL; + case KVM_IRQ_ROUTING_MSI: + if (kvm_msi_route_invalid(kvm, e)) + return -EINVAL; - kvm_set_msi_irq(kvm, e, &irq); + kvm_set_msi_irq(kvm, e, &irq); - if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) - return r; - else - return -EWOULDBLOCK; + if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) + return r; + break; + + default: + break; + } + + return -EWOULDBLOCK; } int kvm_request_irq_source_id(struct kvm *kvm) @@ -254,16 +285,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, srcu_read_unlock(&kvm->irq_srcu, idx); } -static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, - struct kvm *kvm, int irq_source_id, int level, - bool line_status) -{ - if (!level) - return -1; - - return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); -} - int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue) @@ -423,18 +444,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, srcu_read_unlock(&kvm->irq_srcu, idx); } -int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm, - int irq_source_id, int level, bool line_status) -{ - switch (irq->type) { - case KVM_IRQ_ROUTING_HV_SINT: - return kvm_hv_set_sint(irq, kvm, irq_source_id, level, - line_status); - default: - return -EWOULDBLOCK; - } -} - void kvm_arch_irq_routing_update(struct kvm *kvm) { kvm_hv_irq_routing_update(kvm); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 23b99f305382..6f69340f9fa3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, *mask = dest_id & 0xff; return true; case KVM_APIC_MODE_XAPIC_CLUSTER: - *cluster = map->xapic_cluster_map[dest_id >> 4]; + *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf]; *mask = dest_id & 0xf; return true; default: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7e30c720d0c5..073eaeabc2a7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn) struct kvm_shared_msrs *locals = container_of(urn, struct kvm_shared_msrs, urn); struct kvm_shared_msr_values *values; + unsigned long flags; + /* + * Disabling irqs at this point since the following code could be + * interrupted and executed through kvm_arch_hardware_disable() + */ + local_irq_save(flags); + if (locals->registered) { + locals->registered = false; + user_return_notifier_unregister(urn); + } + local_irq_restore(flags); for (slot = 0; slot < shared_msrs_global.nr; ++slot) { values = &locals->values[slot]; if (values->host != values->curr) { @@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn) values->curr = values->host; } } - locals->registered = false; - user_return_notifier_unregister(urn); } static void shared_msr_update(unsigned slot, u32 msr) @@ -1724,18 +1733,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) static u64 __get_kvmclock_ns(struct kvm *kvm) { - struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0); struct kvm_arch *ka = &kvm->arch; - s64 ns; + struct pvclock_vcpu_time_info hv_clock; - if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) { - u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc()); - ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc); - } else { - ns = ktime_get_boot_ns() + ka->kvmclock_offset; + spin_lock(&ka->pvclock_gtod_sync_lock); + if (!ka->use_master_clock) { + spin_unlock(&ka->pvclock_gtod_sync_lock); + return ktime_get_boot_ns() + ka->kvmclock_offset; } - return ns; + hv_clock.tsc_timestamp = ka->master_cycle_now; + hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; + spin_unlock(&ka->pvclock_gtod_sync_lock); + + kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, + &hv_clock.tsc_shift, + &hv_clock.tsc_to_system_mul); + return __pvclock_read_cycles(&hv_clock, rdtsc()); } u64 get_kvmclock_ns(struct kvm *kvm) @@ -2596,7 +2610,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_PIT_STATE2: case KVM_CAP_SET_IDENTITY_MAP_ADDR: case KVM_CAP_XEN_HVM: - case KVM_CAP_ADJUST_CLOCK: case KVM_CAP_VCPU_EVENTS: case KVM_CAP_HYPERV: case KVM_CAP_HYPERV_VAPIC: @@ -2623,6 +2636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) #endif r = 1; break; + case KVM_CAP_ADJUST_CLOCK: + r = KVM_CLOCK_TSC_STABLE; + break; case KVM_CAP_X86_SMM: /* SMBASE is usually relocated above 1M on modern chipsets, * and SMM handlers might indeed rely on 4G segment limits, @@ -3415,6 +3431,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, }; case KVM_SET_VAPIC_ADDR: { struct kvm_vapic_addr va; + int idx; r = -EINVAL; if (!lapic_in_kernel(vcpu)) @@ -3422,7 +3439,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; + idx = srcu_read_lock(&vcpu->kvm->srcu); r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; } case KVM_X86_SETUP_MCE: { @@ -4103,9 +4122,11 @@ long kvm_arch_vm_ioctl(struct file *filp, struct kvm_clock_data user_ns; u64 now_ns; - now_ns = get_kvmclock_ns(kvm); + local_irq_disable(); + now_ns = __get_kvmclock_ns(kvm); user_ns.clock = now_ns; - user_ns.flags = 0; + user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; + local_irq_enable(); memset(&user_ns.pad, 0, sizeof(user_ns.pad)); r = -EFAULT; diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 79ae939970d3..fcd06f7526de 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c @@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) if (early_recursion_flag > 2) goto halt_loop; - if (regs->cs != __KERNEL_CS) + /* + * Old CPUs leave the high bits of CS on the stack + * undefined. I'm not sure which CPUs do this, but at least + * the 486 DX works this way. + */ + if ((regs->cs & 0xFFFF) != __KERNEL_CS) goto fail; /* diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index bf99aa7005eb..936a488d6cf6 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void) int count = 0, pg_shift = 0; void *new_memmap = NULL; efi_status_t status; - phys_addr_t pa; + unsigned long pa; efi.systab = NULL; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 58b0f801f66f..319148bd4b05 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void) memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); } +/* + * Wrapper for slow_virt_to_phys() that handles NULL addresses. + */ +static inline phys_addr_t +virt_to_phys_or_null_size(void *va, unsigned long size) +{ + bool bad_size; + + if (!va) + return 0; + + if (virt_addr_valid(va)) + return virt_to_phys(va); + + /* + * A fully aligned variable on the stack is guaranteed not to + * cross a page bounary. Try to catch strings on the stack by + * checking that 'size' is a power of two. + */ + bad_size = size > PAGE_SIZE || !is_power_of_2(size); + + WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size); + + return slow_virt_to_phys(va); +} + +#define virt_to_phys_or_null(addr) \ + virt_to_phys_or_null_size((addr), sizeof(*(addr))) + int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) { unsigned long pfn, text; @@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc) spin_lock(&rtc_lock); - phys_tm = virt_to_phys(tm); - phys_tc = virt_to_phys(tc); + phys_tm = virt_to_phys_or_null(tm); + phys_tc = virt_to_phys_or_null(tc); status = efi_thunk(get_time, phys_tm, phys_tc); @@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm) spin_lock(&rtc_lock); - phys_tm = virt_to_phys(tm); + phys_tm = virt_to_phys_or_null(tm); status = efi_thunk(set_time, phys_tm); @@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, spin_lock(&rtc_lock); - phys_enabled = virt_to_phys(enabled); - phys_pending = virt_to_phys(pending); - phys_tm = virt_to_phys(tm); + phys_enabled = virt_to_phys_or_null(enabled); + phys_pending = virt_to_phys_or_null(pending); + phys_tm = virt_to_phys_or_null(tm); status = efi_thunk(get_wakeup_time, phys_enabled, phys_pending, phys_tm); @@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) spin_lock(&rtc_lock); - phys_tm = virt_to_phys(tm); + phys_tm = virt_to_phys_or_null(tm); status = efi_thunk(set_wakeup_time, enabled, phys_tm); @@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) return status; } +static unsigned long efi_name_size(efi_char16_t *name) +{ + return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1; +} static efi_status_t efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, @@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, u32 phys_name, phys_vendor, phys_attr; u32 phys_data_size, phys_data; - phys_data_size = virt_to_phys(data_size); - phys_vendor = virt_to_phys(vendor); - phys_name = virt_to_phys(name); - phys_attr = virt_to_phys(attr); - phys_data = virt_to_phys(data); + phys_data_size = virt_to_phys_or_null(data_size); + phys_vendor = virt_to_phys_or_null(vendor); + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); + phys_attr = virt_to_phys_or_null(attr); + phys_data = virt_to_phys_or_null_size(data, *data_size); status = efi_thunk(get_variable, phys_name, phys_vendor, phys_attr, phys_data_size, phys_data); @@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, u32 phys_name, phys_vendor, phys_data; efi_status_t status; - phys_name = virt_to_phys(name); - phys_vendor = virt_to_phys(vendor); - phys_data = virt_to_phys(data); + phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); + phys_vendor = virt_to_phys_or_null(vendor); + phys_data = virt_to_phys_or_null_size(data, data_size); /* If data_size is > sizeof(u32) we've got problems */ status = efi_thunk(set_variable, phys_name, phys_vendor, @@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size, efi_status_t status; u32 phys_name_size, phys_name, phys_vendor; - phys_name_size = virt_to_phys(name_size); - phys_vendor = virt_to_phys(vendor); - phys_name = virt_to_phys(name); + phys_name_size = virt_to_phys_or_null(name_size); + phys_vendor = virt_to_phys_or_null(vendor); + phys_name = virt_to_phys_or_null_size(name, *name_size); status = efi_thunk(get_next_variable, phys_name_size, phys_name, phys_vendor); @@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count) efi_status_t status; u32 phys_count; - phys_count = virt_to_phys(count); + phys_count = virt_to_phys_or_null(count); status = efi_thunk(get_next_high_mono_count, phys_count); return status; @@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status, { u32 phys_data; - phys_data = virt_to_phys(data); + phys_data = virt_to_phys_or_null_size(data, data_size); efi_thunk(reset_system, reset_type, status, data_size, phys_data); } @@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space, if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) return EFI_UNSUPPORTED; - phys_storage = virt_to_phys(storage_space); - phys_remaining = virt_to_phys(remaining_space); - phys_max = virt_to_phys(max_variable_size); + phys_storage = virt_to_phys_or_null(storage_space); + phys_remaining = virt_to_phys_or_null(remaining_space); + phys_max = virt_to_phys_or_null(max_variable_size); status = efi_thunk(query_variable_info, attr, phys_storage, phys_remaining, phys_max); diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index 429d08be7848..dd6cfa4ad3ac 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile @@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o # MISC Devices obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o -obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o +obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c similarity index 65% rename from arch/x86/platform/intel-mid/device_libs/platform_wdt.c rename to arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index de734134bc8d..3f1f1c77d090 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c @@ -1,5 +1,5 @@ /* - * platform_wdt.c: Watchdog platform library file + * Intel Merrifield watchdog platform device library file * * (C) Copyright 2014 Intel Corporation * Author: David Cohen @@ -14,7 +14,9 @@ #include #include #include + #include +#include #include #define TANGIER_EXT_TIMER0_MSI 15 @@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = { .probe = tangier_probe, }; -static int __init register_mid_wdt(void) +static int wdt_scu_status_change(struct notifier_block *nb, + unsigned long code, void *data) { - if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) { - wdt_dev.dev.platform_data = &tangier_pdata; - return platform_device_register(&wdt_dev); + if (code == SCU_DOWN) { + platform_device_unregister(&wdt_dev); + return 0; } - return -ENODEV; + return platform_device_register(&wdt_dev); } +static struct notifier_block wdt_scu_notifier = { + .notifier_call = wdt_scu_status_change, +}; + +static int __init register_mid_wdt(void) +{ + if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) + return -ENODEV; + + wdt_dev.dev.platform_data = &tangier_pdata; + + /* + * We need to be sure that the SCU IPC is ready before watchdog device + * can be registered: + */ + intel_scu_notifier_add(&wdt_scu_notifier); + + return 0; +} rootfs_initcall(register_mid_wdt); diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c index 5d3b45ad1c03..67375dda451c 100644 --- a/arch/x86/platform/intel-mid/pwr.c +++ b/arch/x86/platform/intel-mid/pwr.c @@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) } EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); +pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev) +{ + struct mid_pwr *pwr = midpwr; + int id, reg, bit; + u32 power; + + if (!pwr || !pwr->available) + return PCI_UNKNOWN; + + id = intel_mid_pwr_get_lss_id(pdev); + if (id < 0) + return PCI_UNKNOWN; + + reg = (id * LSS_PWS_BITS) / 32; + bit = (id * LSS_PWS_BITS) % 32; + power = mid_pwr_get_state(pwr, reg); + return (__force pci_power_t)((power >> bit) & 3); +} + void intel_mid_pwr_power_off(void) { struct mid_pwr *pwr = midpwr; diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index ac58c1616408..555b9fa0ad43 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large KBUILD_CFLAGS += -m$(BITS) +KBUILD_CFLAGS += $(call cc-option,-fno-PIE) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index de9b14b2d348..cd400af4a6b2 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -767,7 +767,14 @@ __SYSCALL(346, sys_preadv2, 6) #define __NR_pwritev2 347 __SYSCALL(347, sys_pwritev2, 6) -#define __NR_syscall_count 348 +#define __NR_pkey_mprotect 348 +__SYSCALL(348, sys_pkey_mprotect, 4) +#define __NR_pkey_alloc 349 +__SYSCALL(349, sys_pkey_alloc, 2) +#define __NR_pkey_free 350 +__SYSCALL(350, sys_pkey_free, 1) + +#define __NR_syscall_count 351 /* * sysxtensa syscall handler diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 9a5bcd0381a7..be81e69b25bc 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -172,10 +172,11 @@ void __init time_init(void) { of_clk_init(NULL); #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT - printk("Calibrating CPU frequency "); + pr_info("Calibrating CPU frequency "); calibrate_ccount(); - printk("%d.%02d MHz\n", (int)ccount_freq/1000000, - (int)(ccount_freq/10000)%100); + pr_cont("%d.%02d MHz\n", + (int)ccount_freq / 1000000, + (int)(ccount_freq / 10000) % 100); #else ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; #endif @@ -210,9 +211,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) void calibrate_delay(void) { loops_per_jiffy = ccount_freq / HZ; - printk("Calibrating delay loop (skipped)... " - "%lu.%02lu BogoMIPS preset\n", - loops_per_jiffy/(1000000/HZ), - (loops_per_jiffy/(10000/HZ)) % 100); + pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n", + loops_per_jiffy / (1000000 / HZ), + (loops_per_jiffy / (10000 / HZ)) % 100); } #endif diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index d02fc304b31c..ce37d5b899fe 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -465,26 +465,25 @@ void show_regs(struct pt_regs * regs) for (i = 0; i < 16; i++) { if ((i % 8) == 0) - printk(KERN_INFO "a%02d:", i); - printk(KERN_CONT " %08lx", regs->areg[i]); + pr_info("a%02d:", i); + pr_cont(" %08lx", regs->areg[i]); } - printk(KERN_CONT "\n"); - - printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", - regs->pc, regs->ps, regs->depc, regs->excvaddr); - printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", - regs->lbeg, regs->lend, regs->lcount, regs->sar); + pr_cont("\n"); + pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", + regs->pc, regs->ps, regs->depc, regs->excvaddr); + pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", + regs->lbeg, regs->lend, regs->lcount, regs->sar); if (user_mode(regs)) - printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", - regs->windowbase, regs->windowstart, regs->wmask, - regs->syscall); + pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", + regs->windowbase, regs->windowstart, regs->wmask, + regs->syscall); } static int show_trace_cb(struct stackframe *frame, void *data) { if (kernel_text_address(frame->pc)) { - printk(" [<%08lx>] ", frame->pc); - print_symbol("%s\n", frame->pc); + pr_cont(" [<%08lx>]", frame->pc); + print_symbol(" %s\n", frame->pc); } return 0; } @@ -494,19 +493,13 @@ void show_trace(struct task_struct *task, unsigned long *sp) if (!sp) sp = stack_pointer(task); - printk("Call Trace:"); -#ifdef CONFIG_KALLSYMS - printk("\n"); -#endif + pr_info("Call Trace:\n"); walk_stackframe(sp, show_trace_cb, NULL); - printk("\n"); +#ifndef CONFIG_KALLSYMS + pr_cont("\n"); +#endif } -/* - * This routine abuses get_user()/put_user() to reference pointers - * with at least a bit of error checking ... - */ - static int kstack_depth_to_print = 24; void show_stack(struct task_struct *task, unsigned long *sp) @@ -518,52 +511,29 @@ void show_stack(struct task_struct *task, unsigned long *sp) sp = stack_pointer(task); stack = sp; - printk("\nStack: "); + pr_info("Stack:\n"); for (i = 0; i < kstack_depth_to_print; i++) { if (kstack_end(sp)) break; - if (i && ((i % 8) == 0)) - printk("\n "); - printk("%08lx ", *sp++); + pr_cont(" %08lx", *sp++); + if (i % 8 == 7) + pr_cont("\n"); } - printk("\n"); show_trace(task, stack); } -void show_code(unsigned int *pc) -{ - long i; - - printk("\nCode:"); - - for(i = -3 ; i < 6 ; i++) { - unsigned long insn; - if (__get_user(insn, pc + i)) { - printk(" (Bad address in pc)\n"); - break; - } - printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>')); - } -} - DEFINE_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) { static int die_counter; - int nl = 0; console_verbose(); spin_lock_irq(&die_lock); - printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter); -#ifdef CONFIG_PREEMPT - printk("PREEMPT "); - nl = 1; -#endif - if (nl) - printk("\n"); + pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, + IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : ""); show_regs(regs); if (!user_mode(regs)) show_stack(NULL, (unsigned long*)regs->areg[1]); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 2d8466f9e49b..d19b09cdf284 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -214,23 +214,26 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); - if (ctx->more) { + if (!result && !ctx->more) { + err = af_alg_wait_for_completion( + crypto_ahash_init(&ctx->req), + &ctx->completion); + if (err) + goto unlock; + } + + if (!result || ctx->more) { ctx->more = 0; err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), &ctx->completion); if (err) goto unlock; - } else if (!result) { - err = af_alg_wait_for_completion( - crypto_ahash_digest(&ctx->req), - &ctx->completion); } err = memcpy_to_msg(msg, ctx->result, len); - hash_free_result(sk, ctx); - unlock: + hash_free_result(sk, ctx); release_sock(sk); return err ?: len; diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 865f46ea724f..c80765b211cf 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) return cert; error_decode: - kfree(cert->pub->key); kfree(ctx); error_no_ctx: x509_free_certificate(cert); diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 52ce17a3dd63..c16c94f88733 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c @@ -68,10 +68,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, sg = scatterwalk_ffwd(tmp, sg, start); - if (sg_page(sg) == virt_to_page(buf) && - sg->offset == offset_in_page(buf)) - return; - scatterwalk_start(&walk, sg); scatterwalk_copychunks(buf, &walk, nbytes, out); scatterwalk_done(&walk, out, 0); diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 046c4d0394ee..5fb838e592dc 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c @@ -480,19 +480,17 @@ static void acpi_tb_convert_fadt(void) u32 i; /* - * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which + * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which * should be zero are indeed zero. This will workaround BIOSs that * inadvertently place values in these fields. * * The ACPI 1.0 reserved fields that will be zeroed are the bytes located * at offset 45, 55, 95, and the word located at offset 109, 110. * - * Note: The FADT revision value is unreliable because of BIOS errors. - * The table length is instead used as the final word on the version. - * - * Note: FADT revision 3 is the ACPI 2.0 version of the FADT. + * Note: The FADT revision value is unreliable. Only the length can be + * trusted. */ - if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) { + if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) { acpi_gbl_FADT.preferred_profile = 0; acpi_gbl_FADT.pstate_control = 0; acpi_gbl_FADT.cst_control = 0; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index deb0ff78eba8..54abb26b7366 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -47,32 +47,15 @@ static void acpi_sleep_tts_switch(u32 acpi_state) } } -static void acpi_sleep_pts_switch(u32 acpi_state) -{ - acpi_status status; - - status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state); - if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { - /* - * OS can't evaluate the _PTS object correctly. Some warning - * message will be printed. But it won't break anything. - */ - printk(KERN_NOTICE "Failure in evaluating _PTS object\n"); - } -} - -static int sleep_notify_reboot(struct notifier_block *this, +static int tts_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { acpi_sleep_tts_switch(ACPI_STATE_S5); - - acpi_sleep_pts_switch(ACPI_STATE_S5); - return NOTIFY_DONE; } -static struct notifier_block sleep_notifier = { - .notifier_call = sleep_notify_reboot, +static struct notifier_block tts_notifier = { + .notifier_call = tts_notify_reboot, .next = NULL, .priority = 0, }; @@ -916,9 +899,9 @@ int __init acpi_sleep_init(void) pr_info(PREFIX "(supports%s)\n", supported); /* - * Register the sleep_notifier to reboot notifier list so that the _TTS - * and _PTS object can also be evaluated when the system enters S5. + * Register the tts_notifier to reboot notifier list so that the _TTS + * object can also be evaluated when the system enters S5. */ - register_reboot_notifier(&sleep_notifier); + register_reboot_notifier(&tts_notifier); return 0; } diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 9669fc7c19df..74f4c662f776 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1436,13 +1436,6 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports, "ahci: MRSM is on, fallback to single MSI\n"); pci_free_irq_vectors(pdev); } - - /* - * -ENOSPC indicated we don't have enough vectors. Don't bother - * trying a single vectors for any other error: - */ - if (nvec < 0 && nvec != -ENOSPC) - return nvec; } /* diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 9cceb4a875a5..c4eb4ae9c3aa 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1088,7 +1088,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) desc[1] = tf->command; /* status */ desc[2] = tf->device; desc[3] = tf->nsect; - desc[0] = 0; + desc[7] = 0; if (tf->flags & ATA_TFLAG_LBA48) { desc[8] |= 0x80; if (tf->hob_nsect) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 04365b17ee67..5163c8f918cb 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1403,7 +1403,8 @@ static ssize_t hot_remove_store(struct class *class, zram = idr_find(&zram_index_idr, dev_id); if (zram) { ret = zram_remove(zram); - idr_remove(&zram_index_idr, dev_id); + if (!ret) + idr_remove(&zram_index_idr, dev_id); } else { ret = -ENODEV; } diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index b49e61320952..fc9e8891eae3 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c @@ -484,7 +484,7 @@ static int bt_bmc_remove(struct platform_device *pdev) } static const struct of_device_id bt_bmc_match[] = { - { .compatible = "aspeed,ast2400-bt-bmc" }, + { .compatible = "aspeed,ast2400-ibt-bmc" }, { }, }; @@ -502,4 +502,4 @@ module_platform_driver(bt_bmc_driver); MODULE_DEVICE_TABLE(of, bt_bmc_match); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alistair Popple "); -MODULE_DESCRIPTION("Linux device interface to the BT interface"); +MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface"); diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig index f21e9b7afd1a..e3eed5a78404 100644 --- a/drivers/clk/bcm/Kconfig +++ b/drivers/clk/bcm/Kconfig @@ -20,7 +20,7 @@ config CLK_BCM_KONA config COMMON_CLK_IPROC bool "Broadcom iProc clock support" - depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on ARCH_BCM_IPROC || ARCH_BCM_63XX || COMPILE_TEST depends on COMMON_CLK default ARCH_BCM_IPROC help diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c index edf3b96b3b73..1d99292e2039 100644 --- a/drivers/clk/berlin/bg2.c +++ b/drivers/clk/berlin/bg2.c @@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np) } /* register clk-provider */ - of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); return; diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c index 0718e831475f..3b784b593afd 100644 --- a/drivers/clk/berlin/bg2q.c +++ b/drivers/clk/berlin/bg2q.c @@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np) } /* register clk-provider */ - of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); return; diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c index 8802a2dd56ac..f674778fb3ac 100644 --- a/drivers/clk/clk-efm32gg.c +++ b/drivers/clk/clk-efm32gg.c @@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np) hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0", "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); - of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); } CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 79596463e0d9..fc75a335a7ce 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c @@ -143,7 +143,7 @@ static SUNXI_CCU_NKM_WITH_MUX_GATE_LOCK(pll_mipi_clk, "pll-mipi", 4, 2, /* K */ 0, 4, /* M */ 21, 0, /* mux */ - BIT(31), /* gate */ + BIT(31) | BIT(23) | BIT(22), /* gate */ BIT(28), /* lock */ CLK_SET_RATE_UNGATE); @@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = { static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu", 0x050, 0, 3, axi_div_table, 0); +#define SUN6I_A31_AHB1_REG 0x054 + static const char * const ahb1_parents[] = { "osc32k", "osc24M", "axi", "pll-periph" }; @@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node) val &= BIT(16); writel(val, reg + SUN6I_A31_PLL_MIPI_REG); + /* Force AHB1 to PLL6 / 3 */ + val = readl(reg + SUN6I_A31_AHB1_REG); + /* set PLL6 pre-div = 3 */ + val &= ~GENMASK(7, 6); + val |= 0x2 << 6; + /* select PLL6 / pre-div */ + val &= ~GENMASK(13, 12); + val |= 0x3 << 12; + writel(val, reg + SUN6I_A31_AHB1_REG); + sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk, diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c index 96b40ca57697..9bd1f78a0547 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c @@ -131,7 +131,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi", 8, 4, /* N */ 4, 2, /* K */ 0, 4, /* M */ - BIT(31), /* gate */ + BIT(31) | BIT(23) | BIT(22), /* gate */ BIT(28), /* lock */ CLK_SET_RATE_UNGATE); diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 838b22aa8b67..f2c9274b8bd5 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req) else calcp = 3; - calcm = (req->parent_rate >> calcp) - 1; + calcm = (div >> calcp) - 1; req->rate = (req->parent_rate >> calcp) / (calcm + 1); req->m = calcm; diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 156aad167cd6..954a64c7757b 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -137,7 +137,7 @@ static void dbg_dump_sg(const char *level, const char *prefix_str, } buf = it_page + it->offset; - len = min(tlen, it->length); + len = min_t(size_t, tlen, it->length); print_hex_dump(level, prefix_str, prefix_type, rowsize, groupsize, buf, len, ascii); tlen -= len; @@ -4583,6 +4583,15 @@ static int __init caam_algapi_init(void) if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; + /* + * Check support for AES modes not available + * on LP devices. + */ + if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) + if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == + OP_ALG_AAI_XTS) + continue; + t_alg = caam_alg_alloc(alg); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 0e499bfca41c..3d94ff20fdca 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -270,8 +270,8 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma, if (!dax_dev->alive) return -ENXIO; - /* prevent private / writable mappings from being established */ - if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) { + /* prevent private mappings from being established */ + if ((vma->vm_flags & VM_SHARED) != VM_SHARED) { dev_info(dev, "%s: %s: fail, attempted private mapping\n", current->comm, func); return -EINVAL; diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 4a15fa5df98b..73c6ce93a0d9 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -78,7 +78,9 @@ static int dax_pmem_probe(struct device *dev) nsio = to_nd_namespace_io(&ndns->dev); /* parse the 'pfn' info block via ->rw_bytes */ - devm_nsio_enable(dev, nsio); + rc = devm_nsio_enable(dev, nsio); + if (rc) + return rc; altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap); if (IS_ERR(altmap)) return PTR_ERR(altmap); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index af63a6bcf564..141aefbe37ec 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -306,6 +306,7 @@ config MMP_TDMA depends on ARCH_MMP || COMPILE_TEST select DMA_ENGINE select MMP_SRAM if ARCH_MMP + select GENERIC_ALLOCATOR help Support the MMP Two-Channel DMA engine. This engine used for MMP Audio DMA and pxa910 SQU. diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index bac5f023013b..d5ba43a87a68 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -317,6 +317,12 @@ static irqreturn_t cppi41_irq(int irq, void *data) while (val) { u32 desc, len; + int error; + + error = pm_runtime_get(cdd->ddev.dev); + if (error < 0) + dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", + __func__, error); q_num = __fls(val); val &= ~(1 << q_num); @@ -338,7 +344,6 @@ static irqreturn_t cppi41_irq(int irq, void *data) dma_cookie_complete(&c->txd); dmaengine_desc_get_callback_invoke(&c->txd, NULL); - /* Paired with cppi41_dma_issue_pending */ pm_runtime_mark_last_busy(cdd->ddev.dev); pm_runtime_put_autosuspend(cdd->ddev.dev); } @@ -362,8 +367,13 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan) int error; error = pm_runtime_get_sync(cdd->ddev.dev); - if (error < 0) + if (error < 0) { + dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", + __func__, error); + pm_runtime_put_noidle(cdd->ddev.dev); + return error; + } dma_cookie_init(chan); dma_async_tx_descriptor_init(&c->txd, chan); @@ -385,8 +395,11 @@ static void cppi41_dma_free_chan_resources(struct dma_chan *chan) int error; error = pm_runtime_get_sync(cdd->ddev.dev); - if (error < 0) + if (error < 0) { + pm_runtime_put_noidle(cdd->ddev.dev); + return; + } WARN_ON(!list_empty(&cdd->pending)); @@ -460,9 +473,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan) struct cppi41_dd *cdd = c->cdd; int error; - /* PM runtime paired with dmaengine_desc_get_callback_invoke */ error = pm_runtime_get(cdd->ddev.dev); if ((error != -EINPROGRESS) && error < 0) { + pm_runtime_put_noidle(cdd->ddev.dev); dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", error); @@ -473,6 +486,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan) push_desc_queue(c); else pending_desc(c); + + pm_runtime_mark_last_busy(cdd->ddev.dev); + pm_runtime_put_autosuspend(cdd->ddev.dev); } static u32 get_host_pd0(u32 length) @@ -1059,8 +1075,8 @@ err_chans: deinit_cppi41(dev, cdd); err_init_cppi: pm_runtime_dont_use_autosuspend(dev); - pm_runtime_put_sync(dev); err_get_sync: + pm_runtime_put_sync(dev); pm_runtime_disable(dev); iounmap(cdd->usbss_mem); iounmap(cdd->ctrl_mem); @@ -1072,7 +1088,12 @@ err_get_sync: static int cppi41_dma_remove(struct platform_device *pdev) { struct cppi41_dd *cdd = platform_get_drvdata(pdev); + int error; + error = pm_runtime_get_sync(&pdev->dev); + if (error < 0) + dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n", + __func__, error); of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&cdd->ddev); diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index e18a58068bca..77242b37ef87 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -1628,6 +1628,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) if (echan->slot[0] < 0) { dev_err(dev, "Entry slot allocation failed for channel %u\n", EDMA_CHAN_SLOT(echan->ch_num)); + ret = echan->slot[0]; goto err_slot; } diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 83461994e418..a2358780ab2c 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -578,7 +578,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( burst = convert_burst(8); width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); - v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | + v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | DMA_CHAN_CFG_DST_LINEAR_MODE | DMA_CHAN_CFG_SRC_LINEAR_MODE | diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d011cb89d25e..ed37e5908b91 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -22,10 +22,6 @@ menuconfig GPIOLIB if GPIOLIB -config GPIO_DEVRES - def_bool y - depends on HAS_IOMEM - config OF_GPIO def_bool y depends on OF diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index ab28a2daeacc..d074c2299393 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -2,7 +2,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG -obj-$(CONFIG_GPIO_DEVRES) += devres.o +obj-$(CONFIG_GPIOLIB) += devres.o obj-$(CONFIG_GPIOLIB) += gpiolib.o obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o obj-$(CONFIG_OF_GPIO) += gpiolib-of.o diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index e422568e14ad..fe731f094257 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc, bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ); - memcpy(reg_val, chip->reg_output, NBANK(chip)); mutex_lock(&chip->i2c_lock); + memcpy(reg_val, chip->reg_output, NBANK(chip)); for (bank = 0; bank < NBANK(chip); bank++) { bank_mask = mask[bank / sizeof(*mask)] >> ((bank % sizeof(*mask)) * 8); if (bank_mask) { bank_val = bits[bank / sizeof(*bits)] >> ((bank % sizeof(*bits)) * 8); + bank_val &= bank_mask; reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val; } } @@ -607,7 +608,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, if (client->irq && irq_base != -1 && (chip->driver_data & PCA_INT)) { - ret = pca953x_read_regs(chip, chip->regs->input, chip->irq_stat); if (ret) diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 5a5a6cb00eea..d6e21f1a70a9 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -97,7 +97,7 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip, if (ret < 0) return ret; - return !!(ret & BIT(pos)); + return !(ret & BIT(pos)); } static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip, diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 93ed0e00c578..868128a676ba 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -2737,8 +2737,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) if (IS_ERR(desc)) return PTR_ERR(desc); - /* Flush direction if something changed behind our back */ - if (chip->get_direction) { + /* + * If it's fast: flush the direction setting if something changed + * behind our back + */ + if (!chip->can_sleep && chip->get_direction) { int dir = chip->get_direction(chip, offset); if (dir) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c2b8496cdf63..121a034fe27d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -391,6 +391,7 @@ struct amdgpu_bo { u64 metadata_flags; void *metadata; u32 metadata_size; + unsigned prime_shared_count; /* list of all virtual address to which this bo * is associated to */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index dae35a96a694..6c343a933182 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -34,6 +34,7 @@ struct amdgpu_atpx { static struct amdgpu_atpx_priv { bool atpx_detected; + bool bridge_pm_usable; /* handle for device - and atpx */ acpi_handle dhandle; acpi_handle other_handle; @@ -205,7 +206,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { printk("ATPX Hybrid Graphics\n"); - atpx->functions.power_cntl = false; + /* + * Disable legacy PM methods only when pcie port PM is usable, + * otherwise the device might fail to power off or power on. + */ + atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; atpx->is_hybrid = true; } @@ -555,17 +560,25 @@ static bool amdgpu_atpx_detect(void) struct pci_dev *pdev = NULL; bool has_atpx = false; int vga_count = 0; + bool d3_supported = false; + struct pci_dev *parent_pdev; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); + + parent_pdev = pci_upstream_bridge(pdev); + d3_supported |= parent_pdev && parent_pdev->bridge_d3; } while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { vga_count++; has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); + + parent_pdev = pci_upstream_bridge(pdev); + d3_supported |= parent_pdev && parent_pdev->bridge_d3; } if (has_atpx && vga_count == 2) { @@ -573,6 +586,7 @@ static bool amdgpu_atpx_detect(void) printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", acpi_method_name); amdgpu_atpx_priv.atpx_detected = true; + amdgpu_atpx_priv.bridge_pm_usable = d3_supported; amdgpu_atpx_init(); return true; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 651115dcce12..c02db01f6583 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, entry->priority = min(info[i].bo_priority, AMDGPU_BO_LIST_MAX_PRIORITY); entry->tv.bo = &entry->robj->tbo; - entry->tv.shared = true; + entry->tv.shared = !entry->robj->prime_shared_count; if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9e16e975f31a..deee2db36fce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -636,12 +636,10 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) return false; if (amdgpu_passthrough(adev)) { - /* for FIJI: In whole GPU pass-through virtualization case - * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH) - * so amdgpu_card_posted return false and driver will incorrectly skip vPost. - * but if we force vPost do in pass-through case, the driver reload will hang. - * whether doing vPost depends on amdgpu_card_posted if smc version is above - * 00160e00 for FIJI. + /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot + * some old smc fw still need driver do vPost otherwise gpu hang, while + * those smc fw version above 22.15 doesn't have this flaw, so we force + * vpost executed for smc version below 22.15 */ if (adev->asic_type == CHIP_FIJI) { int err; @@ -652,22 +650,11 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) return true; fw_ver = *((uint32_t *)adev->pm.fw->data + 69); - if (fw_ver >= 0x00160e00) - return !amdgpu_card_posted(adev); + if (fw_ver < 0x00160e00) + return true; } - } else { - /* in bare-metal case, amdgpu_card_posted return false - * after system reboot/boot, and return true if driver - * reloaded. - * we shouldn't do vPost after driver reload otherwise GPU - * could hang. - */ - if (amdgpu_card_posted(adev)) - return false; } - - /* we assume vPost is neede for all other cases */ - return true; + return !amdgpu_card_posted(adev); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 7700dc22f243..3826d5aea0a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, if (ret) return ERR_PTR(ret); + bo->prime_shared_count = 1; return &bo->gem_base; } int amdgpu_gem_prime_pin(struct drm_gem_object *obj) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - int ret = 0; + long ret = 0; ret = amdgpu_bo_reserve(bo, false); if (unlikely(ret != 0)) return ret; + /* + * Wait for all shared fences to complete before we switch to future + * use of exclusive fence on this prime shared bo. + */ + ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, + MAX_SCHEDULE_TIMEOUT); + if (unlikely(ret < 0)) { + DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); + amdgpu_bo_unreserve(bo); + return ret; + } + /* pin buffer into GTT */ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); + if (likely(ret == 0)) + bo->prime_shared_count++; + amdgpu_bo_unreserve(bo); return ret; } @@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) return; amdgpu_bo_unpin(bo); + if (bo->prime_shared_count) + bo->prime_shared_count--; amdgpu_bo_unreserve(bo); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 28e748d688e2..85621a77335d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1483,8 +1483,6 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) table_info->vddgfx_lookup_table, vv_id, &sclk)) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ClockStretcher)) { - if (table_info == NULL) - return -EINVAL; sclk_table = table_info->vdd_dep_on_sclk; for (j = 1; j < sclk_table->count; j++) { @@ -3000,19 +2998,19 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) data->highest_mclk = memory_clock; - performance_level = &(ps->performance_levels - [ps->performance_level_count++]); - PP_ASSERT_WITH_CODE( (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), "Performance levels exceeds SMC limit!", return -EINVAL); PP_ASSERT_WITH_CODE( - (ps->performance_level_count <= + (ps->performance_level_count < hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), - "Performance levels exceeds Driver limit!", - return -EINVAL); + "Performance levels exceeds Driver limit, Skip!", + return 0); + + performance_level = &(ps->performance_levels + [ps->performance_level_count++]); /* Performance levels are arranged from low to high. */ performance_level->memory_clock = memory_clock; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c index 006b22071685..8db8e209d915 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c @@ -2214,6 +2214,7 @@ uint32_t polaris10_get_mac_definition(uint32_t value) int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) { struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); uint32_t tmp; int result; bool error = false; @@ -2233,8 +2234,10 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr) offsetof(SMU74_Firmware_Header, SoftRegisters), &tmp, SMC_RAM_END); - if (!result) + if (!result) { + data->soft_regs_start = tmp; smu_data->smu7_data.soft_regs_start = tmp; + } error |= (0 != result); diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c index b7a8b2ac4055..b69c66b4897e 100644 --- a/drivers/gpu/drm/arc/arcpgu_hdmi.c +++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c @@ -14,170 +14,45 @@ * */ -#include +#include #include -#include #include "arcpgu.h" -struct arcpgu_drm_connector { - struct drm_connector connector; - struct drm_encoder_slave *encoder_slave; -}; - -static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) -{ - const struct drm_encoder_slave_funcs *sfuncs; - struct drm_encoder_slave *slave; - struct arcpgu_drm_connector *con = - container_of(connector, struct arcpgu_drm_connector, connector); - - slave = con->encoder_slave; - if (slave == NULL) { - dev_err(connector->dev->dev, - "connector_get_modes: cannot find slave encoder for connector\n"); - return 0; - } - - sfuncs = slave->slave_funcs; - if (sfuncs->get_modes == NULL) - return 0; - - return sfuncs->get_modes(&slave->base, connector); -} - -static enum drm_connector_status -arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) -{ - enum drm_connector_status status = connector_status_unknown; - const struct drm_encoder_slave_funcs *sfuncs; - struct drm_encoder_slave *slave; - - struct arcpgu_drm_connector *con = - container_of(connector, struct arcpgu_drm_connector, connector); - - slave = con->encoder_slave; - if (slave == NULL) { - dev_err(connector->dev->dev, - "connector_detect: cannot find slave encoder for connector\n"); - return status; - } - - sfuncs = slave->slave_funcs; - if (sfuncs && sfuncs->detect) - return sfuncs->detect(&slave->base, connector); - - dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n"); - return status; -} - -static void arcpgu_drm_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - -static const struct drm_connector_helper_funcs -arcpgu_drm_connector_helper_funcs = { - .get_modes = arcpgu_drm_connector_get_modes, -}; - -static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { - .dpms = drm_helper_connector_dpms, - .reset = drm_atomic_helper_connector_reset, - .detect = arcpgu_drm_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = arcpgu_drm_connector_destroy, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = { - .dpms = drm_i2c_encoder_dpms, - .mode_fixup = drm_i2c_encoder_mode_fixup, - .mode_set = drm_i2c_encoder_mode_set, - .prepare = drm_i2c_encoder_prepare, - .commit = drm_i2c_encoder_commit, - .detect = drm_i2c_encoder_detect, -}; - static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { .destroy = drm_encoder_cleanup, }; int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) { - struct arcpgu_drm_connector *arcpgu_connector; - struct drm_i2c_encoder_driver *driver; - struct drm_encoder_slave *encoder; - struct drm_connector *connector; - struct i2c_client *i2c_slave; - int ret; + struct drm_encoder *encoder; + struct drm_bridge *bridge; + + int ret = 0; encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); if (encoder == NULL) return -ENOMEM; - i2c_slave = of_find_i2c_device_by_node(np); - if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) { - dev_err(drm->dev, "failed to find i2c slave encoder\n"); + /* Locate drm bridge from the hdmi encoder DT node */ + bridge = of_drm_find_bridge(np); + if (!bridge) return -EPROBE_DEFER; - } - if (i2c_slave->dev.driver == NULL) { - dev_err(drm->dev, "failed to find i2c slave driver\n"); - return -EPROBE_DEFER; - } - - driver = - to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver)); - ret = driver->encoder_init(i2c_slave, drm, encoder); - if (ret) { - dev_err(drm->dev, "failed to initialize i2c encoder slave\n"); - return ret; - } - - encoder->base.possible_crtcs = 1; - encoder->base.possible_clones = 0; - ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs, + encoder->possible_crtcs = 1; + encoder->possible_clones = 0; + ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); if (ret) return ret; - drm_encoder_helper_add(&encoder->base, - &arcpgu_drm_encoder_helper_funcs); + /* Link drm_bridge to encoder */ + bridge->encoder = encoder; + encoder->bridge = bridge; - arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector), - GFP_KERNEL); - if (!arcpgu_connector) { - ret = -ENOMEM; - goto error_encoder_cleanup; - } + ret = drm_bridge_attach(drm, bridge); + if (ret) + drm_encoder_cleanup(encoder); - connector = &arcpgu_connector->connector; - drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs); - ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); - if (ret < 0) { - dev_err(drm->dev, "failed to initialize drm connector\n"); - goto error_encoder_cleanup; - } - - ret = drm_mode_connector_attach_encoder(connector, &encoder->base); - if (ret < 0) { - dev_err(drm->dev, "could not attach connector to encoder\n"); - drm_connector_unregister(connector); - goto error_connector_cleanup; - } - - arcpgu_connector->encoder_slave = encoder; - - return 0; - -error_connector_cleanup: - drm_connector_cleanup(connector); - -error_encoder_cleanup: - drm_encoder_cleanup(&encoder->base); return ret; } diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index bbaa55add2d2..7d4e5aa77195 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -150,15 +150,14 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc) clk_prepare_enable(hdlcd->clk); hdlcd_crtc_mode_set_nofb(crtc); hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); + drm_crtc_vblank_on(crtc); } static void hdlcd_crtc_disable(struct drm_crtc *crtc) { struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); - if (!crtc->state->active) - return; - + drm_crtc_vblank_off(crtc); hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); clk_disable_unprepare(hdlcd->clk); } diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index faab7f9bd3b7..e5f4f4a6546d 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -378,7 +378,6 @@ err_register: } err_fbdev: drm_kms_helper_poll_fini(drm); - drm_mode_config_cleanup(drm); drm_vblank_cleanup(drm); err_vblank: pm_runtime_disable(drm->dev); @@ -388,6 +387,7 @@ err_unload: drm_irq_uninstall(drm); of_reserved_mem_device_release(drm->dev); err_free: + drm_mode_config_cleanup(drm); dev_set_drvdata(dev, NULL); drm_dev_unref(drm); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 0ad2c47f808f..71c3473476c7 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -254,10 +254,12 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ req->value = dev->mode_config.async_page_flip; break; case DRM_CAP_PAGE_FLIP_TARGET: - req->value = 1; - drm_for_each_crtc(crtc, dev) { - if (!crtc->funcs->page_flip_target) - req->value = 0; + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + req->value = 1; + drm_for_each_crtc(crtc, dev) { + if (!crtc->funcs->page_flip_target) + req->value = 0; + } } break; case DRM_CAP_CURSOR_WIDTH: diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index e8fb6ef947ee..38eaa63afb31 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1907,6 +1907,8 @@ err_disable_pm_runtime: err_hdmiphy: if (hdata->hdmiphy_port) put_device(&hdata->hdmiphy_port->dev); + if (hdata->regs_hdmiphy) + iounmap(hdata->regs_hdmiphy); err_ddc: put_device(&hdata->ddc_adpt->dev); @@ -1929,6 +1931,9 @@ static int hdmi_remove(struct platform_device *pdev) if (hdata->hdmiphy_port) put_device(&hdata->hdmiphy_port->dev); + if (hdata->regs_hdmiphy) + iounmap(hdata->regs_hdmiphy); + put_device(&hdata->ddc_adpt->dev); return 0; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index b2d5e188b1b8..deb57435cc89 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -25,8 +25,13 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { + struct drm_device *dev = crtc->dev; + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; struct drm_pending_vblank_event *event = crtc->state->event; + regmap_write(fsl_dev->regmap, + DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); + if (event) { crtc->state->event = NULL; @@ -39,11 +44,15 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, } } -static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) +static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) { struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + /* always disable planes on the CRTC */ + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); + drm_crtc_vblank_off(crtc); regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, @@ -122,8 +131,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) } static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { + .atomic_disable = fsl_dcu_drm_crtc_atomic_disable, .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, - .disable = fsl_dcu_drm_disable_crtc, .enable = fsl_dcu_drm_crtc_enable, .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, }; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0b0d1cb11641..320e4728c9b9 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -59,8 +59,6 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev) regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); return ret; } @@ -139,8 +137,6 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) drm_handle_vblank(dev, 0); regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 9e6f7d8112b3..a99f48847420 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -160,11 +160,6 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, DCU_LAYER_POST_SKIP(0) | DCU_LAYER_PRE_SKIP(0)); } - regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, - DCU_MODE_DCU_MODE_MASK, - DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); - regmap_write(fsl_dev->regmap, - DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); return; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b7a7ed82c325..962aae631f13 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12213,7 +12213,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error); if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) { ret = -EIO; - goto cleanup; + goto unlock; } atomic_inc(&intel_crtc->unpin_work_count); @@ -12301,6 +12301,7 @@ cleanup_unpin: intel_unpin_fb_obj(fb, crtc->primary->state->rotation); cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); +unlock: mutex_unlock(&dev->struct_mutex); cleanup: crtc->primary->fb = old_fb; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 019b7ca392d7..c70310206ac5 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -80,6 +80,7 @@ static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp, ddp_comp); priv->crtc = crtc; + writel(0x0, comp->regs + DISP_REG_OVL_INTSTA); writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); } @@ -250,13 +251,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) if (irq < 0) return irq; - ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, - IRQF_TRIGGER_NONE, dev_name(dev), priv); - if (ret < 0) { - dev_err(dev, "Failed to request irq %d: %d\n", irq, ret); - return ret; - } - comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL); if (comp_id < 0) { dev_err(dev, "Failed to identify by alias: %d\n", comp_id); @@ -272,6 +266,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); + ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, + IRQF_TRIGGER_NONE, dev_name(dev), priv); + if (ret < 0) { + dev_err(dev, "Failed to request irq %d: %d\n", irq, ret); + return ret; + } + ret = component_add(dev, &mtk_disp_ovl_component_ops); if (ret) dev_err(dev, "Failed to add component: %d\n", ret); diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 0186e500d2a5..90fb831ef031 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -432,11 +432,16 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, unsigned long pll_rate; unsigned int factor; + /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */ pix_rate = 1000UL * mode->clock; - if (mode->clock <= 74000) + if (mode->clock <= 27000) + factor = 16 * 3; + else if (mode->clock <= 84000) factor = 8 * 3; - else + else if (mode->clock <= 167000) factor = 4 * 3; + else + factor = 2 * 3; pll_rate = pix_rate * factor; dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n", diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index df33b3ca6ffd..48cc01fd20c7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -123,7 +123,7 @@ static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int bpc) { writel(w << 16 | h, comp->regs + DISP_OD_SIZE); - writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE); + writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG); mtk_dither_set(comp, bpc, DISP_OD_CFG); } diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 28b2044ed9f2..eaa5a2240c0c 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -86,7 +86,7 @@ #define DSI_PHY_TIMECON0 0x110 #define LPX (0xff << 0) -#define HS_PRPR (0xff << 8) +#define HS_PREP (0xff << 8) #define HS_ZERO (0xff << 16) #define HS_TRAIL (0xff << 24) @@ -102,10 +102,16 @@ #define CLK_TRAIL (0xff << 24) #define DSI_PHY_TIMECON3 0x11c -#define CLK_HS_PRPR (0xff << 0) +#define CLK_HS_PREP (0xff << 0) #define CLK_HS_POST (0xff << 8) #define CLK_HS_EXIT (0xff << 16) +#define T_LPX 5 +#define T_HS_PREP 6 +#define T_HS_TRAIL 8 +#define T_HS_EXIT 7 +#define T_HS_ZERO 10 + #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) struct phy; @@ -161,20 +167,18 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) static void dsi_phy_timconfig(struct mtk_dsi *dsi) { u32 timcon0, timcon1, timcon2, timcon3; - unsigned int ui, cycle_time; - unsigned int lpx; + u32 ui, cycle_time; ui = 1000 / dsi->data_rate + 0x01; cycle_time = 8000 / dsi->data_rate + 0x01; - lpx = 5; - timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx; - timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 | - (4 * lpx); + timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24; + timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 | + T_HS_EXIT << 24; timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) | (NS_TO_CYCLE(0x150, cycle_time) << 16); - timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 | - NS_TO_CYCLE(0x40, cycle_time); + timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 | + NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8; writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); @@ -202,19 +206,47 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi) { struct device *dev = dsi->dev; int ret; + u64 pixel_clock, total_bits; + u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits; if (++dsi->refcount != 1) return 0; - /** - * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio; - * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000. - * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi. - * we set mipi_ratio is 1.05. - */ - dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10); + switch (dsi->format) { + case MIPI_DSI_FMT_RGB565: + bit_per_pixel = 16; + break; + case MIPI_DSI_FMT_RGB666_PACKED: + bit_per_pixel = 18; + break; + case MIPI_DSI_FMT_RGB666: + case MIPI_DSI_FMT_RGB888: + default: + bit_per_pixel = 24; + break; + } - ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000); + /** + * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000 + * htotal_time = htotal * byte_per_pixel / num_lanes + * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit + * mipi_ratio = (htotal_time + overhead_time) / htotal_time + * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes; + */ + pixel_clock = dsi->vm.pixelclock * 1000; + htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch + + dsi->vm.hsync_len; + htotal_bits = htotal * bit_per_pixel; + + overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL + + T_HS_EXIT; + overhead_bits = overhead_cycles * dsi->lanes * 8; + total_bits = htotal_bits + overhead_bits; + + dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits, + htotal * dsi->lanes); + + ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); if (ret < 0) { dev_err(dev, "Failed to set data rate: %d\n", ret); goto err_refcount; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 71227deef21b..0e8c4d9af340 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1133,12 +1133,6 @@ static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi, phy_power_on(hdmi->phy); mtk_hdmi_aud_output_config(hdmi, mode); - mtk_hdmi_setup_audio_infoframe(hdmi); - mtk_hdmi_setup_avi_infoframe(hdmi, mode); - mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); - if (mode->flags & DRM_MODE_FLAG_3D_MASK) - mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); - mtk_hdmi_hw_vid_black(hdmi, false); mtk_hdmi_hw_aud_unmute(hdmi); mtk_hdmi_hw_send_av_unmute(hdmi); @@ -1401,6 +1395,16 @@ static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge) hdmi->powered = true; } +static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi, + struct drm_display_mode *mode) +{ + mtk_hdmi_setup_audio_infoframe(hdmi); + mtk_hdmi_setup_avi_infoframe(hdmi, mode); + mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); + if (mode->flags & DRM_MODE_FLAG_3D_MASK) + mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); +} + static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) { struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); @@ -1409,6 +1413,7 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); phy_power_on(hdmi->phy); + mtk_hdmi_send_infoframe(hdmi, &hdmi->mode); hdmi->enabled = true; } diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c index 8a24754b440f..51cb9cfb6646 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c @@ -265,6 +265,9 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); unsigned int pre_div; unsigned int div; + unsigned int pre_ibias; + unsigned int hdmi_ibias; + unsigned int imp_en; dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, rate, parent_rate); @@ -298,18 +301,31 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, (0x1 << PLL_BR_SHIFT), RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | RG_HDMITX_PLL_BR); - mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN); + if (rate < 165000000) { + mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, + RG_HDMITX_PRD_IMP_EN); + pre_ibias = 0x3; + imp_en = 0x0; + hdmi_ibias = hdmi_phy->ibias; + } else { + mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3, + RG_HDMITX_PRD_IMP_EN); + pre_ibias = 0x6; + imp_en = 0xf; + hdmi_ibias = hdmi_phy->ibias_up; + } mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, - (0x3 << PRD_IBIAS_CLK_SHIFT) | - (0x3 << PRD_IBIAS_D2_SHIFT) | - (0x3 << PRD_IBIAS_D1_SHIFT) | - (0x3 << PRD_IBIAS_D0_SHIFT), + (pre_ibias << PRD_IBIAS_CLK_SHIFT) | + (pre_ibias << PRD_IBIAS_D2_SHIFT) | + (pre_ibias << PRD_IBIAS_D1_SHIFT) | + (pre_ibias << PRD_IBIAS_D0_SHIFT), RG_HDMITX_PRD_IBIAS_CLK | RG_HDMITX_PRD_IBIAS_D2 | RG_HDMITX_PRD_IBIAS_D1 | RG_HDMITX_PRD_IBIAS_D0); mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, - (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN); + (imp_en << DRV_IMP_EN_SHIFT), + RG_HDMITX_DRV_IMP_EN); mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | @@ -318,12 +334,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, - (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) | - (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) | - (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) | - (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT), - RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 | - RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0); + (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) | + (hdmi_ibias << DRV_IBIAS_D2_SHIFT) | + (hdmi_ibias << DRV_IBIAS_D1_SHIFT) | + (hdmi_ibias << DRV_IBIAS_D0_SHIFT), + RG_HDMITX_DRV_IBIAS_CLK | + RG_HDMITX_DRV_IBIAS_D2 | + RG_HDMITX_DRV_IBIAS_D1 | + RG_HDMITX_DRV_IBIAS_D0); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 2fdcd04bc93f..0ae13cd2adda 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -34,6 +34,7 @@ struct radeon_atpx { static struct radeon_atpx_priv { bool atpx_detected; + bool bridge_pm_usable; /* handle for device - and atpx */ acpi_handle dhandle; struct radeon_atpx atpx; @@ -203,7 +204,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) atpx->is_hybrid = false; if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { printk("ATPX Hybrid Graphics\n"); - atpx->functions.power_cntl = false; + /* + * Disable legacy PM methods only when pcie port PM is usable, + * otherwise the device might fail to power off or power on. + */ + atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable; atpx->is_hybrid = true; } @@ -548,11 +553,16 @@ static bool radeon_atpx_detect(void) struct pci_dev *pdev = NULL; bool has_atpx = false; int vga_count = 0; + bool d3_supported = false; + struct pci_dev *parent_pdev; while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); + + parent_pdev = pci_upstream_bridge(pdev); + d3_supported |= parent_pdev && parent_pdev->bridge_d3; } /* some newer PX laptops mark the dGPU as a non-VGA display device */ @@ -560,6 +570,9 @@ static bool radeon_atpx_detect(void) vga_count++; has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); + + parent_pdev = pci_upstream_bridge(pdev); + d3_supported |= parent_pdev && parent_pdev->bridge_d3; } if (has_atpx && vga_count == 2) { @@ -567,6 +580,7 @@ static bool radeon_atpx_detect(void) printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", acpi_method_name); radeon_atpx_priv.atpx_detected = true; + radeon_atpx_priv.bridge_pm_usable = d3_supported; radeon_atpx_init(); return true; } diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 2891aa914cfa..4ce665349f6b 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -141,9 +141,9 @@ static int sun4i_drv_bind(struct device *dev) /* Create our layers */ drv->layers = sun4i_layers_init(drm); - if (!drv->layers) { + if (IS_ERR(drv->layers)) { dev_err(drm->dev, "Couldn't create the planes\n"); - ret = -EINVAL; + ret = PTR_ERR(drv->layers); goto free_drm; } diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index c3ff10f559cc..d198ad7e5323 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -152,15 +152,13 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Enabling RGB output\n"); - if (!IS_ERR(tcon->panel)) { + if (!IS_ERR(tcon->panel)) drm_panel_prepare(tcon->panel); - drm_panel_enable(tcon->panel); - } - - /* encoder->bridge can be NULL; drm_bridge_enable checks for it */ - drm_bridge_enable(encoder->bridge); sun4i_tcon_channel_enable(tcon, 0); + + if (!IS_ERR(tcon->panel)) + drm_panel_enable(tcon->panel); } static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) @@ -171,15 +169,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) DRM_DEBUG_DRIVER("Disabling RGB output\n"); + if (!IS_ERR(tcon->panel)) + drm_panel_disable(tcon->panel); + sun4i_tcon_channel_disable(tcon, 0); - /* encoder->bridge can be NULL; drm_bridge_disable checks for it */ - drm_bridge_disable(encoder->bridge); - - if (!IS_ERR(tcon->panel)) { - drm_panel_disable(tcon->panel); + if (!IS_ERR(tcon->panel)) drm_panel_unprepare(tcon->panel); - } } static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder, diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 086d8a507157..60d30203a5fa 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -32,6 +32,11 @@ #include #include "hid-ids.h" +#define CP2112_REPORT_MAX_LENGTH 64 +#define CP2112_GPIO_CONFIG_LENGTH 5 +#define CP2112_GPIO_GET_LENGTH 2 +#define CP2112_GPIO_SET_LENGTH 3 + enum { CP2112_GPIO_CONFIG = 0x02, CP2112_GPIO_GET = 0x03, @@ -161,6 +166,8 @@ struct cp2112_device { atomic_t read_avail; atomic_t xfer_avail; struct gpio_chip gc; + u8 *in_out_buffer; + spinlock_t lock; }; static int gpio_push_pull = 0xFF; @@ -171,62 +178,86 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; - u8 buf[5]; + u8 *buf = dev->in_out_buffer; + unsigned long flags; int ret; + spin_lock_irqsave(&dev->lock, flags); + ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, - sizeof(buf), HID_FEATURE_REPORT, - HID_REQ_GET_REPORT); - if (ret != sizeof(buf)) { + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); + if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); - return ret; + goto exit; } buf[1] &= ~(1 << offset); buf[2] = gpio_push_pull; - ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, + HID_REQ_SET_REPORT); if (ret < 0) { hid_err(hdev, "error setting GPIO config: %d\n", ret); - return ret; + goto exit; } - return 0; + ret = 0; + +exit: + spin_unlock_irqrestore(&dev->lock, flags); + return ret <= 0 ? ret : -EIO; } static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; - u8 buf[3]; + u8 *buf = dev->in_out_buffer; + unsigned long flags; int ret; + spin_lock_irqsave(&dev->lock, flags); + buf[0] = CP2112_GPIO_SET; buf[1] = value ? 0xff : 0; buf[2] = 1 << offset; - ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf), - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, + CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT, + HID_REQ_SET_REPORT); if (ret < 0) hid_err(hdev, "error setting GPIO values: %d\n", ret); + + spin_unlock_irqrestore(&dev->lock, flags); } static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; - u8 buf[2]; + u8 *buf = dev->in_out_buffer; + unsigned long flags; int ret; - ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf), - HID_FEATURE_REPORT, HID_REQ_GET_REPORT); - if (ret != sizeof(buf)) { + spin_lock_irqsave(&dev->lock, flags); + + ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, + CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); + if (ret != CP2112_GPIO_GET_LENGTH) { hid_err(hdev, "error requesting GPIO values: %d\n", ret); - return ret; + ret = ret < 0 ? ret : -EIO; + goto exit; } - return (buf[1] >> offset) & 1; + ret = (buf[1] >> offset) & 1; + +exit: + spin_unlock_irqrestore(&dev->lock, flags); + + return ret; } static int cp2112_gpio_direction_output(struct gpio_chip *chip, @@ -234,27 +265,33 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip, { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; - u8 buf[5]; + u8 *buf = dev->in_out_buffer; + unsigned long flags; int ret; + spin_lock_irqsave(&dev->lock, flags); + ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, - sizeof(buf), HID_FEATURE_REPORT, - HID_REQ_GET_REPORT); - if (ret != sizeof(buf)) { + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, + HID_REQ_GET_REPORT); + if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); - return ret; + goto fail; } buf[1] |= 1 << offset; buf[2] = gpio_push_pull; - ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), - HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, + CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, + HID_REQ_SET_REPORT); if (ret < 0) { hid_err(hdev, "error setting GPIO config: %d\n", ret); - return ret; + goto fail; } + spin_unlock_irqrestore(&dev->lock, flags); + /* * Set gpio value when output direction is already set, * as specified in AN495, Rev. 0.2, cpt. 4.4 @@ -262,6 +299,10 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip, cp2112_gpio_set(chip, offset, value); return 0; + +fail: + spin_unlock_irqrestore(&dev->lock, flags); + return ret < 0 ? ret : -EIO; } static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, @@ -1007,6 +1048,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) struct cp2112_smbus_config_report config; int ret; + dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH, + GFP_KERNEL); + if (!dev->in_out_buffer) + return -ENOMEM; + + spin_lock_init(&dev->lock); + ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); @@ -1063,12 +1115,6 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) goto err_power_normal; } - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) { - ret = -ENOMEM; - goto err_power_normal; - } - hid_set_drvdata(hdev, (void *)dev); dev->hdev = hdev; dev->adap.owner = THIS_MODULE; @@ -1087,7 +1133,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) if (ret) { hid_err(hdev, "error registering i2c adapter\n"); - goto err_free_dev; + goto err_power_normal; } hid_dbg(hdev, "adapter registered\n"); @@ -1123,8 +1169,6 @@ err_gpiochip_remove: gpiochip_remove(&dev->gc); err_free_i2c: i2c_del_adapter(&dev->adap); -err_free_dev: - kfree(dev); err_power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); err_hid_close: @@ -1149,7 +1193,6 @@ static void cp2112_remove(struct hid_device *hdev) */ hid_hw_close(hdev); hid_hw_stop(hdev); - kfree(dev); } static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report, diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index 76f644deb0a7..c5c5fbe9d605 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -756,11 +756,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) /* Setup wireless link with Logitech Wii wheel */ if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { - unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL); - ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), + if (!buf) { + ret = -ENOMEM; + goto err_free; + } + + ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); - if (ret >= 0) { /* insert a little delay of 10 jiffies ~ 40ms */ wait_queue_head_t wait; @@ -772,9 +777,10 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) buf[1] = 0xB2; get_random_bytes(&buf[2], 2); - ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), + ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); } + kfree(buf); } if (drv_data->quirks & LG_FF) diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index d6fa496d0ca2..20b40ad26325 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -493,7 +493,8 @@ static int magicmouse_input_configured(struct hid_device *hdev, static int magicmouse_probe(struct hid_device *hdev, const struct hid_device_id *id) { - __u8 feature[] = { 0xd7, 0x01 }; + const u8 feature[] = { 0xd7, 0x01 }; + u8 *buf; struct magicmouse_sc *msc; struct hid_report *report; int ret; @@ -544,6 +545,12 @@ static int magicmouse_probe(struct hid_device *hdev, } report->size = 6; + buf = kmemdup(feature, sizeof(feature), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto err_stop_hw; + } + /* * Some devices repond with 'invalid report id' when feature * report switching it into multitouch mode is sent to it. @@ -552,8 +559,9 @@ static int magicmouse_probe(struct hid_device *hdev, * but there seems to be no other way of switching the mode. * Thus the super-ugly hacky success check below. */ - ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature), + ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + kfree(buf); if (ret != -EIO && ret != sizeof(feature)) { hid_err(hdev, "unable to request touch data (%d)\n", ret); goto err_stop_hw; diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 9cd2ca34a6be..be89bcbf6a71 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c @@ -188,10 +188,16 @@ static int rmi_set_page(struct hid_device *hdev, u8 page) static int rmi_set_mode(struct hid_device *hdev, u8 mode) { int ret; - u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; + const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; + u8 *buf; - ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf, + buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf, sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + kfree(buf); if (ret < 0) { dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode, ret); diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index c5c3d6111729..60875625cbdf 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -212,6 +212,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, __s32 value; int ret = 0; + memset(buffer, 0, buffer_size); mutex_lock(&data->mutex); report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); if (!report || (field_index >= report->maxfield)) { diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index d223650a97e4..11edabf425ae 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -59,7 +59,6 @@ config I2C_CHARDEV config I2C_MUX tristate "I2C bus multiplexing support" - depends on HAS_IOMEM help Say Y here if you want the I2C core to support the ability to handle multiplexed I2C bus topologies, by presenting each diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 11e866d05368..b403fa5ecf49 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -91,9 +91,7 @@ DW_IC_INTR_TX_ABRT | \ DW_IC_INTR_STOP_DET) -#define DW_IC_STATUS_ACTIVITY 0x1 -#define DW_IC_STATUS_TFE BIT(2) -#define DW_IC_STATUS_MST_ACTIVITY BIT(5) +#define DW_IC_STATUS_ACTIVITY 0x1 #define DW_IC_SDA_HOLD_RX_SHIFT 16 #define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT) @@ -478,25 +476,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; u32 ic_tar = 0; - bool enabled; - enabled = dw_readl(dev, DW_IC_ENABLE_STATUS) & 1; - - if (enabled) { - u32 ic_status; - - /* - * Only disable adapter if ic_tar and ic_con can't be - * dynamically updated - */ - ic_status = dw_readl(dev, DW_IC_STATUS); - if (!dev->dynamic_tar_update_enabled || - (ic_status & DW_IC_STATUS_MST_ACTIVITY) || - !(ic_status & DW_IC_STATUS_TFE)) { - __i2c_dw_enable_and_wait(dev, false); - enabled = false; - } - } + /* Disable the adapter */ + __i2c_dw_enable_and_wait(dev, false); /* if the slave address is ten bit address, enable 10BITADDR */ if (dev->dynamic_tar_update_enabled) { @@ -526,8 +508,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) /* enforce disabled interrupts (due to HW issues) */ i2c_dw_disable_int(dev); - if (!enabled) - __i2c_dw_enable(dev, true); + /* Enable the adapter */ + __i2c_dw_enable(dev, true); /* Clear and enable interrupts */ dw_readl(dev, DW_IC_CLR_INTR); @@ -611,7 +593,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { /* avoid rx buffer overrun */ - if (rx_limit - dev->rx_outstanding <= 0) + if (dev->rx_outstanding >= dev->rx_fifo_depth) break; dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); @@ -708,8 +690,7 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) } /* - * Prepare controller for a transaction and start transfer by calling - * i2c_dw_xfer_init() + * Prepare controller for a transaction and call i2c_dw_xfer_msg */ static int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) @@ -752,13 +733,23 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) goto done; } + /* + * We must disable the adapter before returning and signaling the end + * of the current transfer. Otherwise the hardware might continue + * generating interrupts which in turn causes a race condition with + * the following transfer. Needs some more investigation if the + * additional interrupts are a hardware bug or this driver doesn't + * handle them correctly yet. + */ + __i2c_dw_enable(dev, false); + if (dev->msg_err) { ret = dev->msg_err; goto done; } /* no error */ - if (likely(!dev->cmd_err)) { + if (likely(!dev->cmd_err && !dev->status)) { ret = num; goto done; } @@ -768,6 +759,11 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) ret = i2c_dw_handle_tx_abort(dev); goto done; } + + if (dev->status) + dev_err(dev->dev, + "transfer terminated early - interrupt latency too high?\n"); + ret = -EIO; done: @@ -888,19 +884,9 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) */ tx_aborted: - if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) - || dev->msg_err) { - /* - * We must disable interruts before returning and signaling - * the end of the current transfer. Otherwise the hardware - * might continue generating interrupts for non-existent - * transfers. - */ - i2c_dw_disable_int(dev); - dw_readl(dev, DW_IC_CLR_INTR); - + if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) complete(&dev->cmd_complete); - } else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { + else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { /* workaround to trigger pending interrupt */ stat = dw_readl(dev, DW_IC_INTR_MASK); i2c_dw_disable_int(dev); diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c index 49f2084f7bb5..50813a24c541 100644 --- a/drivers/i2c/busses/i2c-digicolor.c +++ b/drivers/i2c/busses/i2c-digicolor.c @@ -347,7 +347,7 @@ static int dc_i2c_probe(struct platform_device *pdev) ret = i2c_add_adapter(&i2c->adap); if (ret < 0) { - clk_unprepare(i2c->clk); + clk_disable_unprepare(i2c->clk); return ret; } diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c index 419b54bfc7c7..5e63b17f935d 100644 --- a/drivers/i2c/busses/i2c-octeon-core.c +++ b/drivers/i2c/busses/i2c-octeon-core.c @@ -381,9 +381,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, if (result) return result; - data[i] = octeon_i2c_data_read(i2c, &result); - if (result) - return result; + data[i] = octeon_i2c_data_read(i2c); if (recv_len && i == 0) { if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) return -EPROTO; diff --git a/drivers/i2c/busses/i2c-octeon-core.h b/drivers/i2c/busses/i2c-octeon-core.h index 1db7c835a454..87151ea74acd 100644 --- a/drivers/i2c/busses/i2c-octeon-core.h +++ b/drivers/i2c/busses/i2c-octeon-core.h @@ -5,7 +5,6 @@ #include #include #include -#include #include #include @@ -145,9 +144,9 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 u64 tmp; __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI(i2c)); - - readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, tmp & SW_TWSI_V, - I2C_OCTEON_EVENT_WAIT, i2c->adap.timeout); + do { + tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c)); + } while ((tmp & SW_TWSI_V) != 0); } #define octeon_i2c_ctl_write(i2c, val) \ @@ -164,28 +163,24 @@ static inline void octeon_i2c_reg_write(struct octeon_i2c *i2c, u64 eop_reg, u8 * * The I2C core registers are accessed indirectly via the SW_TWSI CSR. */ -static inline int octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg, - int *error) +static inline u8 octeon_i2c_reg_read(struct octeon_i2c *i2c, u64 eop_reg) { u64 tmp; - int ret; __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI(i2c)); + do { + tmp = __raw_readq(i2c->twsi_base + SW_TWSI(i2c)); + } while ((tmp & SW_TWSI_V) != 0); - ret = readq_poll_timeout(i2c->twsi_base + SW_TWSI(i2c), tmp, - tmp & SW_TWSI_V, I2C_OCTEON_EVENT_WAIT, - i2c->adap.timeout); - if (error) - *error = ret; return tmp & 0xFF; } #define octeon_i2c_ctl_read(i2c) \ - octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL, NULL) -#define octeon_i2c_data_read(i2c, error) \ - octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA, error) + octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_CTL) +#define octeon_i2c_data_read(i2c) \ + octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_DATA) #define octeon_i2c_stat_read(i2c) \ - octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT, NULL) + octeon_i2c_reg_read(i2c, SW_TWSI_EOP_TWSI_STAT) /** * octeon_i2c_read_int - read the TWSI_INT register diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index e280c8ecc0b5..96de9ce5669b 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig @@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL config I2C_MUX_REG tristate "Register-based I2C multiplexer" + depends on HAS_IOMEM help If you say yes to this option, support will be included for a register based I2C multiplexer. This driver provides access to diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index b3893f6282ba..3e6fe1760d82 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -69,10 +69,28 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne goto err_with_revert; } - p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); + /* + * Check if there are pinctrl states at all. Note: we cant' use + * devm_pinctrl_get_select() because we need to distinguish between + * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state(). + */ + p = devm_pinctrl_get(adap->dev.parent); if (IS_ERR(p)) { ret = PTR_ERR(p); - goto err_with_put; + /* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */ + if (ret != -ENODEV) + goto err_with_put; + } else { + /* there are states. check and use them */ + struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name); + + if (IS_ERR(s)) { + ret = PTR_ERR(s); + goto err_with_put; + } + ret = pinctrl_select_state(p, s); + if (ret < 0) + goto err_with_put; } priv->chan[new_chan].parent_adap = adap; diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 1091346f2480..8bc3d36d2837 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -268,9 +268,9 @@ static int pca954x_probe(struct i2c_client *client, /* discard unconfigured channels */ break; idle_disconnect_pd = pdata->modes[num].deselect_on_exit; - data->deselect |= (idle_disconnect_pd - || idle_disconnect_dt) << num; } + data->deselect |= (idle_disconnect_pd || + idle_disconnect_dt) << num; ret = i2c_mux_add_adapter(muxc, force, num, class); diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index b136d3acc5bd..0f58f46dbad7 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel); struct resolve_cb_context { struct rdma_dev_addr *addr; struct completion comp; + int status; }; static void resolve_cb(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context) { - memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct - rdma_dev_addr)); + if (!status) + memcpy(((struct resolve_cb_context *)context)->addr, + addr, sizeof(struct rdma_dev_addr)); + ((struct resolve_cb_context *)context)->status = status; complete(&((struct resolve_cb_context *)context)->comp); } @@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, wait_for_completion(&ctx.comp); + ret = ctx.status; + if (ret) + return ret; + memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); if (!dev) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c99525512b34..71c7c4c328ef 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -80,6 +80,8 @@ static struct ib_cm { __be32 random_id_operand; struct list_head timewait_list; struct workqueue_struct *wq; + /* Sync on cm change port state */ + spinlock_t state_lock; } cm; /* Counter indexes ordered by attribute ID */ @@ -161,6 +163,8 @@ struct cm_port { struct ib_mad_agent *mad_agent; struct kobject port_obj; u8 port_num; + struct list_head cm_priv_prim_list; + struct list_head cm_priv_altr_list; struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; }; @@ -241,6 +245,12 @@ struct cm_id_private { u8 service_timeout; u8 target_ack_delay; + struct list_head prim_list; + struct list_head altr_list; + /* Indicates that the send port mad is registered and av is set */ + int prim_send_port_not_ready; + int altr_send_port_not_ready; + struct list_head work_list; atomic_t work_count; }; @@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, struct ib_mad_agent *mad_agent; struct ib_mad_send_buf *m; struct ib_ah *ah; + struct cm_av *av; + unsigned long flags, flags2; + int ret = 0; + /* don't let the port to be released till the agent is down */ + spin_lock_irqsave(&cm.state_lock, flags2); + spin_lock_irqsave(&cm.lock, flags); + if (!cm_id_priv->prim_send_port_not_ready) + av = &cm_id_priv->av; + else if (!cm_id_priv->altr_send_port_not_ready && + (cm_id_priv->alt_av.port)) + av = &cm_id_priv->alt_av; + else { + pr_info("%s: not valid CM id\n", __func__); + ret = -ENODEV; + spin_unlock_irqrestore(&cm.lock, flags); + goto out; + } + spin_unlock_irqrestore(&cm.lock, flags); + /* Make sure the port haven't released the mad yet */ mad_agent = cm_id_priv->av.port->mad_agent; - ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); - if (IS_ERR(ah)) - return PTR_ERR(ah); + if (!mad_agent) { + pr_info("%s: not a valid MAD agent\n", __func__); + ret = -ENODEV; + goto out; + } + ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); + if (IS_ERR(ah)) { + ret = PTR_ERR(ah); + goto out; + } m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, - cm_id_priv->av.pkey_index, + av->pkey_index, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (IS_ERR(m)) { ib_destroy_ah(ah); - return PTR_ERR(m); + ret = PTR_ERR(m); + goto out; } /* Timeout set by caller if response is expected. */ @@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, atomic_inc(&cm_id_priv->refcount); m->context[0] = cm_id_priv; *msg = m; - return 0; + +out: + spin_unlock_irqrestore(&cm.state_lock, flags2); + return ret; } static int cm_alloc_response_msg(struct cm_port *port, @@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, grh, &av->ah_attr); } -static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) +static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, + struct cm_id_private *cm_id_priv) { struct cm_device *cm_dev; struct cm_port *port = NULL; @@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) &av->ah_attr); av->timeout = path->packet_life_time + 1; - return 0; + spin_lock_irqsave(&cm.lock, flags); + if (&cm_id_priv->av == av) + list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); + else if (&cm_id_priv->alt_av == av) + list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); + else + ret = -EINVAL; + + spin_unlock_irqrestore(&cm.lock, flags); + + return ret; } static int cm_alloc_id(struct cm_id_private *cm_id_priv) @@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, spin_lock_init(&cm_id_priv->lock); init_completion(&cm_id_priv->comp); INIT_LIST_HEAD(&cm_id_priv->work_list); + INIT_LIST_HEAD(&cm_id_priv->prim_list); + INIT_LIST_HEAD(&cm_id_priv->altr_list); atomic_set(&cm_id_priv->work_count, -1); atomic_set(&cm_id_priv->refcount, 1); return &cm_id_priv->id; @@ -892,6 +945,15 @@ retest: break; } + spin_lock_irq(&cm.lock); + if (!list_empty(&cm_id_priv->altr_list) && + (!cm_id_priv->altr_send_port_not_ready)) + list_del(&cm_id_priv->altr_list); + if (!list_empty(&cm_id_priv->prim_list) && + (!cm_id_priv->prim_send_port_not_ready)) + list_del(&cm_id_priv->prim_list); + spin_unlock_irq(&cm.lock); + cm_free_id(cm_id->local_id); cm_deref_id(cm_id_priv); wait_for_completion(&cm_id_priv->comp); @@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, goto out; } - ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); + ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, + cm_id_priv); if (ret) goto error1; if (param->alternate_path) { ret = cm_init_av_by_path(param->alternate_path, - &cm_id_priv->alt_av); + &cm_id_priv->alt_av, cm_id_priv); if (ret) goto error1; } @@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work) dev_put(gid_attr.ndev); } work->path[0].gid_type = gid_attr.gid_type; - ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); + ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, + cm_id_priv); } if (ret) { int err = ib_get_cached_gid(work->port->cm_dev->ib_device, @@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work) goto rejected; } if (req_msg->alt_local_lid) { - ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); + ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, + cm_id_priv); if (ret) { ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, &work->path[0].sgid, @@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, goto out; } - ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); + ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, + cm_id_priv); if (ret) goto out; cm_id_priv->alt_av.timeout = @@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work) cm_init_av_for_response(work->port, work->mad_recv_wc->wc, work->mad_recv_wc->recv_buf.grh, &cm_id_priv->av); - cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); + cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, + cm_id_priv); ret = atomic_inc_and_test(&cm_id_priv->work_count); if (!ret) list_add_tail(&work->list, &cm_id_priv->work_list); @@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, return -EINVAL; cm_id_priv = container_of(cm_id, struct cm_id_private, id); - ret = cm_init_av_by_path(param->path, &cm_id_priv->av); + ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); if (ret) goto out; @@ -3468,7 +3535,9 @@ out: static int cm_migrate(struct ib_cm_id *cm_id) { struct cm_id_private *cm_id_priv; + struct cm_av tmp_av; unsigned long flags; + int tmp_send_port_not_ready; int ret = 0; cm_id_priv = container_of(cm_id, struct cm_id_private, id); @@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id) (cm_id->lap_state == IB_CM_LAP_UNINIT || cm_id->lap_state == IB_CM_LAP_IDLE)) { cm_id->lap_state = IB_CM_LAP_IDLE; + /* Swap address vector */ + tmp_av = cm_id_priv->av; cm_id_priv->av = cm_id_priv->alt_av; + cm_id_priv->alt_av = tmp_av; + /* Swap port send ready state */ + tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; + cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; + cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; } else ret = -EINVAL; spin_unlock_irqrestore(&cm_id_priv->lock, flags); @@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device) port->cm_dev = cm_dev; port->port_num = i; + INIT_LIST_HEAD(&port->cm_priv_prim_list); + INIT_LIST_HEAD(&port->cm_priv_altr_list); + ret = cm_create_port_fs(port); if (ret) goto error1; @@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) { struct cm_device *cm_dev = client_data; struct cm_port *port; + struct cm_id_private *cm_id_priv; + struct ib_mad_agent *cur_mad_agent; struct ib_port_modify port_modify = { .clr_port_cap_mask = IB_PORT_CM_SUP }; @@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) port = cm_dev->port[i-1]; ib_modify_port(ib_device, port->port_num, 0, &port_modify); + /* Mark all the cm_id's as not valid */ + spin_lock_irq(&cm.lock); + list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) + cm_id_priv->altr_send_port_not_ready = 1; + list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) + cm_id_priv->prim_send_port_not_ready = 1; + spin_unlock_irq(&cm.lock); /* * We flush the queue here after the going_down set, this * verify that no new works will be queued in the recv handler, * after that we can call the unregister_mad_agent */ flush_workqueue(cm.wq); - ib_unregister_mad_agent(port->mad_agent); + spin_lock_irq(&cm.state_lock); + cur_mad_agent = port->mad_agent; + port->mad_agent = NULL; + spin_unlock_irq(&cm.state_lock); + ib_unregister_mad_agent(cur_mad_agent); cm_remove_port_fs(port); } + device_unregister(cm_dev->device); kfree(cm_dev); } @@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void) INIT_LIST_HEAD(&cm.device_list); rwlock_init(&cm.device_lock); spin_lock_init(&cm.lock); + spin_lock_init(&cm.state_lock); cm.listen_service_table = RB_ROOT; cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); cm.remote_id_table = RB_ROOT; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 89a6b0546804..2a6fc47a1dfb 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2438,6 +2438,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos) return 0; } +static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, + unsigned long supported_gids, + enum ib_gid_type default_gid) +{ + if ((network_type == RDMA_NETWORK_IPV4 || + network_type == RDMA_NETWORK_IPV6) && + test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) + return IB_GID_TYPE_ROCE_UDP_ENCAP; + + return default_gid; +} + static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) { struct rdma_route *route = &id_priv->id.route; @@ -2463,6 +2475,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) route->num_paths = 1; if (addr->dev_addr.bound_dev_if) { + unsigned long supported_gids; + ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); if (!ndev) { ret = -ENODEV; @@ -2486,7 +2500,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) route->path_rec->net = &init_net; route->path_rec->ifindex = ndev->ifindex; - route->path_rec->gid_type = id_priv->gid_type; + supported_gids = roce_gid_type_mask_support(id_priv->id.device, + id_priv->id.port_num); + route->path_rec->gid_type = + cma_route_gid_type(addr->dev_addr.network, + supported_gids, + id_priv->gid_type); } if (!ndev) { ret = -ENODEV; diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 224ad274ea0b..84b4eff90395 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, cur_base = addr & PAGE_MASK; - if (npages == 0) { + if (npages == 0 || npages > UINT_MAX) { ret = -EINVAL; goto out; } diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 0012fa58c105..44b1104eb168 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, container_of(uobj, struct ib_uqp_object, uevent.uobject); idr_remove_uobj(&ib_uverbs_qp_idr, uobj); - if (qp != qp->real_qp) { - ib_close_qp(qp); - } else { + if (qp == qp->real_qp) ib_uverbs_detach_umcast(qp, uqp); - ib_destroy_qp(qp); - } + ib_destroy_qp(qp); ib_uverbs_release_uevent(file, &uqp->uevent); kfree(uqp); } diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 867b8cf82be8..19c6477af19f 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -666,18 +666,6 @@ skip_cqe: return ret; } -static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey) -{ - struct c4iw_mr *mhp; - unsigned long flags; - - spin_lock_irqsave(&rhp->lock, flags); - mhp = get_mhp(rhp, rkey >> 8); - if (mhp) - mhp->attr.state = 0; - spin_unlock_irqrestore(&rhp->lock, flags); -} - /* * Get one cq entry from c4iw and map it to openib. * @@ -733,7 +721,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); wc->wc_flags |= IB_WC_WITH_INVALIDATE; - invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); + c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); } } else { switch (CQE_OPCODE(&cqe)) { @@ -762,7 +750,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) /* Invalidate the MR if the fastreg failed */ if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) - invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); + c4iw_invalidate_mr(qhp->rhp, + CQE_WRID_FR_STAG(&cqe)); break; default: printk(KERN_ERR MOD "Unexpected opcode %d " diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7e7f79e55006..4788e1a46fde 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -999,6 +999,6 @@ extern int db_coalescing_threshold; extern int use_dsgl; void c4iw_drain_rq(struct ib_qp *qp); void c4iw_drain_sq(struct ib_qp *qp); - +void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); #endif diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 80e27749420a..410408f886c1 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -770,3 +770,15 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) kfree(mhp); return 0; } + +void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) +{ + struct c4iw_mr *mhp; + unsigned long flags; + + spin_lock_irqsave(&rhp->lock, flags); + mhp = get_mhp(rhp, rkey >> 8); + if (mhp) + mhp->attr.state = 0; + spin_unlock_irqrestore(&rhp->lock, flags); +} diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index f57deba6717c..b7ac97b27c88 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -706,12 +706,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, return 0; } -static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe, - struct ib_send_wr *wr, u8 *len16) +static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) { - struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8); - - mhp->attr.state = 0; wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); wqe->inv.r2 = 0; *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); @@ -797,11 +793,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); + *bad_wr = wr; return -EINVAL; } num_wrs = t4_sq_avail(&qhp->wq); if (num_wrs == 0) { spin_unlock_irqrestore(&qhp->lock, flag); + *bad_wr = wr; return -ENOMEM; } while (wr) { @@ -840,10 +838,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, case IB_WR_RDMA_READ_WITH_INV: fw_opcode = FW_RI_RDMA_READ_WR; swsqe->opcode = FW_RI_READ_REQ; - if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) + if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { + c4iw_invalidate_mr(qhp->rhp, + wr->sg_list[0].lkey); fw_flags = FW_RI_RDMA_READ_INVALIDATE; - else + } else { fw_flags = 0; + } err = build_rdma_read(wqe, wr, &len16); if (err) break; @@ -876,7 +877,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, fw_flags |= FW_RI_LOCAL_FENCE_FLAG; fw_opcode = FW_RI_INV_LSTAG_WR; swsqe->opcode = FW_RI_LOCAL_INV; - err = build_inv_stag(qhp->rhp, wqe, wr, &len16); + err = build_inv_stag(wqe, wr, &len16); + c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey); break; default: PDBG("%s post of type=%d TBD!\n", __func__, @@ -934,11 +936,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); + *bad_wr = wr; return -EINVAL; } num_wrs = t4_rq_avail(&qhp->wq); if (num_wrs == 0) { spin_unlock_irqrestore(&qhp->lock, flag); + *bad_wr = wr; return -ENOMEM; } while (wr) { diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index a26a9a0bfc41..67ea85a56945 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -775,75 +775,3 @@ void hfi1_put_proc_affinity(int cpu) } mutex_unlock(&affinity->lock); } - -int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, - size_t count) -{ - struct hfi1_affinity_node *entry; - cpumask_var_t mask; - int ret, i; - - mutex_lock(&node_affinity.lock); - entry = node_affinity_lookup(dd->node); - - if (!entry) { - ret = -EINVAL; - goto unlock; - } - - ret = zalloc_cpumask_var(&mask, GFP_KERNEL); - if (!ret) { - ret = -ENOMEM; - goto unlock; - } - - ret = cpulist_parse(buf, mask); - if (ret) - goto out; - - if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) { - dd_dev_warn(dd, "Invalid CPU mask\n"); - ret = -EINVAL; - goto out; - } - - /* reset the SDMA interrupt affinity details */ - init_cpu_mask_set(&entry->def_intr); - cpumask_copy(&entry->def_intr.mask, mask); - - /* Reassign the affinity for each SDMA interrupt. */ - for (i = 0; i < dd->num_msix_entries; i++) { - struct hfi1_msix_entry *msix; - - msix = &dd->msix_entries[i]; - if (msix->type != IRQ_SDMA) - continue; - - ret = get_irq_affinity(dd, msix); - - if (ret) - break; - } -out: - free_cpumask_var(mask); -unlock: - mutex_unlock(&node_affinity.lock); - return ret ? ret : strnlen(buf, PAGE_SIZE); -} - -int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf) -{ - struct hfi1_affinity_node *entry; - - mutex_lock(&node_affinity.lock); - entry = node_affinity_lookup(dd->node); - - if (!entry) { - mutex_unlock(&node_affinity.lock); - return -EINVAL; - } - - cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask); - mutex_unlock(&node_affinity.lock); - return strnlen(buf, PAGE_SIZE); -} diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h index b89ea3c0ee1a..42e63316afd1 100644 --- a/drivers/infiniband/hw/hfi1/affinity.h +++ b/drivers/infiniband/hw/hfi1/affinity.h @@ -102,10 +102,6 @@ int hfi1_get_proc_affinity(int); /* Release a CPU used by a user process. */ void hfi1_put_proc_affinity(int); -int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf); -int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, - size_t count); - struct hfi1_affinity_node { int node; struct cpu_mask_set def_intr; diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 9bf5f23544d4..24d0820873cf 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -6301,19 +6301,8 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) /* leave shared count at zero for both global and VL15 */ write_global_credit(dd, vau, vl15buf, 0); - /* We may need some credits for another VL when sending packets - * with the snoop interface. Dividing it down the middle for VL15 - * and VL0 should suffice. - */ - if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) { - write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1) - << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); - write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1) - << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT); - } else { - write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf - << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); - } + write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf + << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); } /* @@ -9915,9 +9904,6 @@ static void set_lidlmc(struct hfi1_pportdata *ppd) u32 mask = ~((1U << ppd->lmc) - 1); u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); - if (dd->hfi1_snoop.mode_flag) - dd_dev_info(dd, "Set lid/lmc while snooping"); - c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) @@ -12112,7 +12098,7 @@ static void update_synth_timer(unsigned long opaque) mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); } -#define C_MAX_NAME 13 /* 12 chars + one for /0 */ +#define C_MAX_NAME 16 /* 15 chars + one for /0 */ static int init_cntrs(struct hfi1_devdata *dd) { int i, rcv_ctxts, j; @@ -14463,7 +14449,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, * Any error printing is already done by the init code. * On return, we have the chip mapped. */ - ret = hfi1_pcie_ddinit(dd, pdev, ent); + ret = hfi1_pcie_ddinit(dd, pdev); if (ret < 0) goto bail_free; @@ -14691,6 +14677,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, if (ret) goto bail_free_cntrs; + init_completion(&dd->user_comp); + + /* The user refcount starts with one to inidicate an active device */ + atomic_set(&dd->user_refcount, 1); + goto bail; bail_free_rcverr: diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index 92345259a8f4..043fd21dc5f3 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -320,6 +320,9 @@ /* DC_DC8051_CFG_MODE.GENERAL bits */ #define DISABLE_SELF_GUID_CHECK 0x2 +/* Bad L2 frame error code */ +#define BAD_L2_ERR 0x6 + /* * Eager buffer minimum and maximum sizes supported by the hardware. * All power-of-two sizes in between are supported as well. diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 6563e4d38b80..c5efff29c147 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -599,7 +599,6 @@ static void __prescan_rxq(struct hfi1_packet *packet) dd->rhf_offset; struct rvt_qp *qp; struct ib_header *hdr; - struct ib_other_headers *ohdr; struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; u64 rhf = rhf_to_cpu(rhf_addr); u32 etype = rhf_rcv_type(rhf), qpn, bth1; @@ -615,18 +614,21 @@ static void __prescan_rxq(struct hfi1_packet *packet) if (etype != RHF_RCV_TYPE_IB) goto next; - hdr = hfi1_get_msgheader(dd, rhf_addr); + packet->hdr = hfi1_get_msgheader(dd, rhf_addr); + hdr = packet->hdr; lnh = be16_to_cpu(hdr->lrh[0]) & 3; - if (lnh == HFI1_LRH_BTH) - ohdr = &hdr->u.oth; - else if (lnh == HFI1_LRH_GRH) - ohdr = &hdr->u.l.oth; - else + if (lnh == HFI1_LRH_BTH) { + packet->ohdr = &hdr->u.oth; + } else if (lnh == HFI1_LRH_GRH) { + packet->ohdr = &hdr->u.l.oth; + packet->rcv_flags |= HFI1_HAS_GRH; + } else { goto next; /* just in case */ + } - bth1 = be32_to_cpu(ohdr->bth[1]); + bth1 = be32_to_cpu(packet->ohdr->bth[1]); is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); if (!is_ecn) @@ -646,7 +648,7 @@ static void __prescan_rxq(struct hfi1_packet *packet) /* turn off BECN, FECN */ bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); - ohdr->bth[1] = cpu_to_be32(bth1); + packet->ohdr->bth[1] = cpu_to_be32(bth1); next: update_ps_mdata(&mdata, rcd); } @@ -1360,12 +1362,25 @@ int process_receive_ib(struct hfi1_packet *packet) int process_receive_bypass(struct hfi1_packet *packet) { + struct hfi1_devdata *dd = packet->rcd->dd; + if (unlikely(rhf_err_flags(packet->rhf))) handle_eflags(packet); - dd_dev_err(packet->rcd->dd, + dd_dev_err(dd, "Bypass packets are not supported in normal operation. Dropping\n"); - incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors); + incr_cntr64(&dd->sw_rcv_bypass_packet_errors); + if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) { + u64 *flits = packet->ebuf; + + if (flits && !(packet->rhf & RHF_LEN_ERR)) { + dd->err_info_rcvport.packet_flit1 = flits[0]; + dd->err_info_rcvport.packet_flit2 = + packet->tlen > sizeof(flits[0]) ? flits[1] : 0; + } + dd->err_info_rcvport.status_and_code |= + (OPA_EI_STATUS_SMASK | BAD_L2_ERR); + } return RHF_RCV_CONTINUE; } diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 677efa0e8cd6..bd786b7bd30b 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -172,6 +172,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) struct hfi1_devdata, user_cdev); + if (!atomic_inc_not_zero(&dd->user_refcount)) + return -ENXIO; + /* Just take a ref now. Not all opens result in a context assign */ kobject_get(&dd->kobj); @@ -183,11 +186,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) fd->rec_cpu_num = -1; /* no cpu affinity by default */ fd->mm = current->mm; atomic_inc(&fd->mm->mm_count); + fp->private_data = fd; + } else { + fp->private_data = NULL; + + if (atomic_dec_and_test(&dd->user_refcount)) + complete(&dd->user_comp); + + return -ENOMEM; } - fp->private_data = fd; - - return fd ? 0 : -ENOMEM; + return 0; } static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, @@ -798,6 +807,10 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) done: mmdrop(fdata->mm); kobject_put(&dd->kobj); + + if (atomic_dec_and_test(&dd->user_refcount)) + complete(&dd->user_comp); + kfree(fdata); return 0; } diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 7eef11b316ff..cc87fd4e534b 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -367,26 +367,6 @@ struct hfi1_packet { u8 etype; }; -/* - * Private data for snoop/capture support. - */ -struct hfi1_snoop_data { - int mode_flag; - struct cdev cdev; - struct device *class_dev; - /* protect snoop data */ - spinlock_t snoop_lock; - struct list_head queue; - wait_queue_head_t waitq; - void *filter_value; - int (*filter_callback)(void *hdr, void *data, void *value); - u64 dcc_cfg; /* saved value of DCC Cfg register */ -}; - -/* snoop mode_flag values */ -#define HFI1_PORT_SNOOP_MODE 1U -#define HFI1_PORT_CAPTURE_MODE 2U - struct rvt_sge_state; /* @@ -613,8 +593,6 @@ struct hfi1_pportdata { struct mutex hls_lock; u32 host_link_state; - spinlock_t sdma_alllock ____cacheline_aligned_in_smp; - u32 lstate; /* logical link state */ /* these are the "32 bit" regs */ @@ -1104,8 +1082,6 @@ struct hfi1_devdata { char *portcntrnames; size_t portcntrnameslen; - struct hfi1_snoop_data hfi1_snoop; - struct err_info_rcvport err_info_rcvport; struct err_info_constraint err_info_rcv_constraint; struct err_info_constraint err_info_xmit_constraint; @@ -1141,8 +1117,8 @@ struct hfi1_devdata { rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; /* - * Handlers for outgoing data so that snoop/capture does not - * have to have its hooks in the send path + * Capability to have different send engines simply by changing a + * pointer value. */ send_routine process_pio_send; send_routine process_dma_send; @@ -1174,6 +1150,10 @@ struct hfi1_devdata { spinlock_t aspm_lock; /* Number of verbs contexts which have disabled ASPM */ atomic_t aspm_disabled_cnt; + /* Keeps track of user space clients */ + atomic_t user_refcount; + /* Used to wait for outstanding user space clients before dev removal */ + struct completion user_comp; struct hfi1_affinity *affinity; struct rhashtable sdma_rht; @@ -1221,8 +1201,6 @@ struct hfi1_devdata *hfi1_lookup(int unit); extern u32 hfi1_cpulist_count; extern unsigned long *hfi1_cpulist; -extern unsigned int snoop_drop_send; -extern unsigned int snoop_force_capture; int hfi1_init(struct hfi1_devdata *, int); int hfi1_count_units(int *npresentp, int *nupp); int hfi1_count_active_units(void); @@ -1557,13 +1535,6 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); void reset_link_credits(struct hfi1_devdata *dd); void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); -int snoop_recv_handler(struct hfi1_packet *packet); -int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, - u64 pbc); -int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, - u64 pbc); -void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, - u64 pbc, const void *from, size_t count); int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) @@ -1763,8 +1734,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); void hfi1_pcie_cleanup(struct pci_dev *); -int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *, - const struct pci_device_id *); +int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *); void hfi1_pcie_ddcleanup(struct hfi1_devdata *); void hfi1_pcie_flr(struct hfi1_devdata *); int pcie_speeds(struct hfi1_devdata *); @@ -1799,8 +1769,6 @@ int kdeth_process_expected(struct hfi1_packet *packet); int kdeth_process_eager(struct hfi1_packet *packet); int process_receive_invalid(struct hfi1_packet *packet); -extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8]; - void update_sge(struct rvt_sge_state *ss, u32 length); /* global module parameter variables */ @@ -1827,9 +1795,6 @@ extern struct mutex hfi1_mutex; #define DRIVER_NAME "hfi1" #define HFI1_USER_MINOR_BASE 0 #define HFI1_TRACE_MINOR 127 -#define HFI1_DIAGPKT_MINOR 128 -#define HFI1_DIAG_MINOR_BASE 129 -#define HFI1_SNOOP_CAPTURE_BASE 200 #define HFI1_NMINORS 255 #define PCI_VENDOR_ID_INTEL 0x8086 @@ -1848,7 +1813,13 @@ extern struct mutex hfi1_mutex; static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, u16 ctxt_type) { - u64 base_sc_integrity = + u64 base_sc_integrity; + + /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ + if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) + return 0; + + base_sc_integrity = SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK @@ -1863,7 +1834,6 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK - | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; @@ -1872,18 +1842,23 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, else base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; - if (is_ax(dd)) - /* turn off send-side job key checks - A0 */ - return base_sc_integrity & - ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; + /* turn on send-side job key checks if !A0 */ + if (!is_ax(dd)) + base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; + return base_sc_integrity; } static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) { - u64 base_sdma_integrity = + u64 base_sdma_integrity; + + /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ + if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) + return 0; + + base_sdma_integrity = SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK - | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK @@ -1895,14 +1870,18 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK - | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; - if (is_ax(dd)) - /* turn off send-side job key checks - A0 */ - return base_sdma_integrity & - ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; + if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) + base_sdma_integrity |= + SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK; + + /* turn on send-side job key checks if !A0 */ + if (!is_ax(dd)) + base_sdma_integrity |= + SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; + return base_sdma_integrity; } diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 60db61536fed..e3b5bc93bc70 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -144,6 +144,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) struct hfi1_ctxtdata *rcd; ppd = dd->pport + (i % dd->num_pports); + + /* dd->rcd[i] gets assigned inside the callee */ rcd = hfi1_create_ctxtdata(ppd, i, dd->node); if (!rcd) { dd_dev_err(dd, @@ -169,8 +171,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) if (!rcd->sc) { dd_dev_err(dd, "Unable to allocate kernel send context, failing\n"); - dd->rcd[rcd->ctxt] = NULL; - hfi1_free_ctxtdata(dd, rcd); goto nomem; } @@ -178,9 +178,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) if (ret < 0) { dd_dev_err(dd, "Failed to setup kernel receive context, failing\n"); - sc_free(rcd->sc); - dd->rcd[rcd->ctxt] = NULL; - hfi1_free_ctxtdata(dd, rcd); ret = -EFAULT; goto bail; } @@ -196,6 +193,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) nomem: ret = -ENOMEM; bail: + if (dd->rcd) { + for (i = 0; i < dd->num_rcv_contexts; ++i) + hfi1_free_ctxtdata(dd, dd->rcd[i]); + } kfree(dd->rcd); dd->rcd = NULL; return ret; @@ -216,7 +217,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, dd->num_rcv_contexts - dd->first_user_ctxt) kctxt_ngroups = (dd->rcv_entries.nctxt_extra - (dd->num_rcv_contexts - dd->first_user_ctxt)); - rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); + rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); if (rcd) { u32 rcvtids, max_entries; @@ -261,13 +262,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, } rcd->eager_base = base * dd->rcv_entries.group_size; - /* Validate and initialize Rcv Hdr Q variables */ - if (rcvhdrcnt % HDRQ_INCREMENT) { - dd_dev_err(dd, - "ctxt%u: header queue count %d must be divisible by %lu\n", - rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); - goto bail; - } rcd->rcvhdrq_cnt = rcvhdrcnt; rcd->rcvhdrqentsize = hfi1_hdrq_entsize; /* @@ -506,7 +500,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); mutex_init(&ppd->hls_lock); - spin_lock_init(&ppd->sdma_alllock); spin_lock_init(&ppd->qsfp_info.qsfp_lock); ppd->qsfp_info.ppd = ppd; @@ -1399,28 +1392,43 @@ static void postinit_cleanup(struct hfi1_devdata *dd) hfi1_free_devdata(dd); } +static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) +{ + if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { + hfi1_early_err(dev, "Receive header queue count too small\n"); + return -EINVAL; + } + + if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { + hfi1_early_err(dev, + "Receive header queue count cannot be greater than %u\n", + HFI1_MAX_HDRQ_EGRBUF_CNT); + return -EINVAL; + } + + if (thecnt % HDRQ_INCREMENT) { + hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", + thecnt, HDRQ_INCREMENT); + return -EINVAL; + } + + return 0; +} + static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret = 0, j, pidx, initfail; - struct hfi1_devdata *dd = ERR_PTR(-EINVAL); + struct hfi1_devdata *dd; struct hfi1_pportdata *ppd; /* First, lock the non-writable module parameters */ HFI1_CAP_LOCK(); /* Validate some global module parameters */ - if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { - hfi1_early_err(&pdev->dev, "Header queue count too small\n"); - ret = -EINVAL; + ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); + if (ret) goto bail; - } - if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { - hfi1_early_err(&pdev->dev, - "Receive header queue count cannot be greater than %u\n", - HFI1_MAX_HDRQ_EGRBUF_CNT); - ret = -EINVAL; - goto bail; - } + /* use the encoding function as a sanitization check */ if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", @@ -1461,26 +1469,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto bail; - /* - * Do device-specific initialization, function table setup, dd - * allocation, etc. - */ - switch (ent->device) { - case PCI_DEVICE_ID_INTEL0: - case PCI_DEVICE_ID_INTEL1: - dd = hfi1_init_dd(pdev, ent); - break; - default: + if (!(ent->device == PCI_DEVICE_ID_INTEL0 || + ent->device == PCI_DEVICE_ID_INTEL1)) { hfi1_early_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n", ent->device); ret = -ENODEV; + goto clean_bail; } - if (IS_ERR(dd)) + /* + * Do device-specific initialization, function table setup, dd + * allocation, etc. + */ + dd = hfi1_init_dd(pdev, ent); + + if (IS_ERR(dd)) { ret = PTR_ERR(dd); - if (ret) goto clean_bail; /* error already printed */ + } ret = create_workqueues(dd); if (ret) @@ -1538,12 +1545,31 @@ bail: return ret; } +static void wait_for_clients(struct hfi1_devdata *dd) +{ + /* + * Remove the device init value and complete the device if there is + * no clients or wait for active clients to finish. + */ + if (atomic_dec_and_test(&dd->user_refcount)) + complete(&dd->user_comp); + + wait_for_completion(&dd->user_comp); +} + static void remove_one(struct pci_dev *pdev) { struct hfi1_devdata *dd = pci_get_drvdata(pdev); /* close debugfs files before ib unregister */ hfi1_dbg_ibdev_exit(&dd->verbs_dev); + + /* remove the /dev hfi1 interface */ + hfi1_device_remove(dd); + + /* wait for existing user space clients to finish */ + wait_for_clients(dd); + /* unregister from IB core */ hfi1_unregister_ib_device(dd); @@ -1558,8 +1584,6 @@ static void remove_one(struct pci_dev *pdev) /* wait until all of our (qsfp) queue_work() calls complete */ flush_workqueue(ib_wq); - hfi1_device_remove(dd); - postinit_cleanup(dd); } diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 89c68da1c273..4ac8f330c5cb 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -157,8 +157,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev) * fields required to re-initialize after a chip reset, or for * various other purposes */ -int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, - const struct pci_device_id *ent) +int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) { unsigned long len; resource_size_t addr; diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 50a3a36d9363..d89b8745d4c1 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -668,19 +668,12 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) void set_pio_integrity(struct send_context *sc) { struct hfi1_devdata *dd = sc->dd; - u64 reg = 0; u32 hw_context = sc->hw_context; int type = sc->type; - /* - * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if - * we're snooping. - */ - if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) && - dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE) - reg = hfi1_pkt_default_send_ctxt_mask(dd, type); - - write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg); + write_kctxt_csr(dd, hw_context, + SC(CHECK_ENABLE), + hfi1_pkt_default_send_ctxt_mask(dd, type)); } static u32 get_buffers_allocated(struct send_context *sc) diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 8bc5013f39a1..83198a8a8797 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -89,7 +89,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) lockdep_assert_held(&qp->s_lock); qp->s_flags |= RVT_S_WAIT_RNR; - qp->s_timer.expires = jiffies + usecs_to_jiffies(to); + priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to); add_timer(&priv->s_rnr_timer); } diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index fd39bcaa062d..9cbe52d21077 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -2009,11 +2009,6 @@ static void sdma_hw_start_up(struct sdma_engine *sde) write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); } -#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ -(r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) - -#define SET_STATIC_RATE_CONTROL_SMASK(r) \ -(r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) /* * set_sdma_integrity * @@ -2022,19 +2017,9 @@ static void sdma_hw_start_up(struct sdma_engine *sde) static void set_sdma_integrity(struct sdma_engine *sde) { struct hfi1_devdata *dd = sde->dd; - u64 reg; - if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY))) - return; - - reg = hfi1_pkt_base_sdma_integrity(dd); - - if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) - CLEAR_STATIC_RATE_CONTROL_SMASK(reg); - else - SET_STATIC_RATE_CONTROL_SMASK(reg); - - write_sde_csr(sde, SD(CHECK_ENABLE), reg); + write_sde_csr(sde, SD(CHECK_ENABLE), + hfi1_pkt_base_sdma_integrity(dd)); } static void init_sdma_regs( diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index edba22461a9c..919a5474e651 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -49,7 +49,6 @@ #include "hfi.h" #include "mad.h" #include "trace.h" -#include "affinity.h" /* * Start of per-port congestion control structures and support code @@ -623,27 +622,6 @@ static ssize_t show_tempsense(struct device *device, return ret; } -static ssize_t show_sdma_affinity(struct device *device, - struct device_attribute *attr, char *buf) -{ - struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); - struct hfi1_devdata *dd = dd_from_dev(dev); - - return hfi1_get_sdma_affinity(dd, buf); -} - -static ssize_t store_sdma_affinity(struct device *device, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct hfi1_ibdev *dev = - container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); - struct hfi1_devdata *dd = dd_from_dev(dev); - - return hfi1_set_sdma_affinity(dd, buf, count); -} - /* * end of per-unit (or driver, in some cases, but replicated * per unit) functions @@ -658,8 +636,6 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); -static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity, - store_sdma_affinity); static struct device_attribute *hfi1_attributes[] = { &dev_attr_hw_rev, @@ -670,7 +646,6 @@ static struct device_attribute *hfi1_attributes[] = { &dev_attr_boardversion, &dev_attr_tempsense, &dev_attr_chip_reset, - &dev_attr_sdma_affinity, }; int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h index 11e02b228922..f77e59fb43fe 100644 --- a/drivers/infiniband/hw/hfi1/trace_rx.h +++ b/drivers/infiniband/hw/hfi1/trace_rx.h @@ -253,66 +253,6 @@ TRACE_EVENT(hfi1_mmu_invalidate, ) ); -#define SNOOP_PRN \ - "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \ - "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]" - -TRACE_EVENT(snoop_capture, - TP_PROTO(struct hfi1_devdata *dd, - int hdr_len, - struct ib_header *hdr, - int data_len, - void *data), - TP_ARGS(dd, hdr_len, hdr, data_len, data), - TP_STRUCT__entry( - DD_DEV_ENTRY(dd) - __field(u16, slid) - __field(u16, dlid) - __field(u32, qpn) - __field(u8, opcode) - __field(u8, sl) - __field(u16, pkey) - __field(u32, hdr_len) - __field(u32, data_len) - __field(u8, lnh) - __dynamic_array(u8, raw_hdr, hdr_len) - __dynamic_array(u8, raw_pkt, data_len) - ), - TP_fast_assign( - struct ib_other_headers *ohdr; - - __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3); - if (__entry->lnh == HFI1_LRH_BTH) - ohdr = &hdr->u.oth; - else - ohdr = &hdr->u.l.oth; - DD_DEV_ASSIGN(dd); - __entry->slid = be16_to_cpu(hdr->lrh[3]); - __entry->dlid = be16_to_cpu(hdr->lrh[1]); - __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; - __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; - __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf; - __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff; - __entry->hdr_len = hdr_len; - __entry->data_len = data_len; - memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len); - memcpy(__get_dynamic_array(raw_pkt), data, data_len); - ), - TP_printk( - "[%s] " SNOOP_PRN, - __get_str(dev), - __entry->slid, - __entry->dlid, - __entry->qpn, - __entry->opcode, - show_ib_opcode(__entry->opcode), - __entry->sl, - __entry->pkey, - __entry->hdr_len, - __entry->data_len - ) -); - #endif /* __HFI1_TRACE_RX_H */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index a761f804111e..77697d690f3e 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -1144,7 +1144,7 @@ static int pin_vector_pages(struct user_sdma_request *req, rb_node = hfi1_mmu_rb_extract(pq->handler, (unsigned long)iovec->iov.iov_base, iovec->iov.iov_len); - if (rb_node && !IS_ERR(rb_node)) + if (rb_node) node = container_of(rb_node, struct sdma_mmu_node, rb); else rb_node = NULL; diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 5fc623362731..b9bf0759f10a 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr if (vlan_tag < 0x1000) vlan_tag |= (ah_attr->sl & 7) << 13; ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); - ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); + ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); + if (ret < 0) + return ERR_PTR(ret); + ah->av.eth.gid_index = ret; ah->av.eth.vlan = cpu_to_be16(vlan_tag); ah->av.eth.hop_limit = ah_attr->grh.hop_limit; if (ah_attr->static_rate) { diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 1ea686b9e0f9..6a0fec357dae 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, if (context) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { err = -EFAULT; - goto err_dbmap; + goto err_cq_free; } return &cq->ibcq; +err_cq_free: + mlx4_cq_free(dev->dev, &cq->mcq); + err_dbmap: if (context) mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 79d017baf6f4..fcd04b881ec1 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -932,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, if (err) goto err_create; } else { - /* for now choose 64 bytes till we have a proper interface */ - cqe_size = 64; + cqe_size = cache_line_size() == 128 ? 128 : 64; err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, &index, &inlen); if (err) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 63036c731626..32b09f059c84 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2311,14 +2311,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, { struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; struct ib_event ibev; - + bool fatal = false; u8 port = 0; switch (event) { case MLX5_DEV_EVENT_SYS_ERROR: - ibdev->ib_active = false; ibev.event = IB_EVENT_DEVICE_FATAL; mlx5_ib_handle_internal_error(ibdev); + fatal = true; break; case MLX5_DEV_EVENT_PORT_UP: @@ -2370,6 +2370,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, if (ibdev->ib_active) ib_dispatch_event(&ibev); + + if (fatal) + ibdev->ib_active = false; } static void get_ext_port_caps(struct mlx5_ib_dev *dev) @@ -3115,7 +3118,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) } err = init_node_data(dev); if (err) - goto err_dealloc; + goto err_free_port; mutex_init(&dev->flow_db.lock); mutex_init(&dev->cap_mask_mutex); @@ -3125,7 +3128,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) if (ll == IB_LINK_LAYER_ETHERNET) { err = mlx5_enable_roce(dev); if (err) - goto err_dealloc; + goto err_free_port; } err = create_dev_resources(&dev->devr); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index dcdcd195fe53..7d689903c87c 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -626,6 +626,8 @@ struct mlx5_ib_dev { struct mlx5_ib_resources devr; struct mlx5_mr_cache cache; struct timer_list delay_timer; + /* Prevents soft lock on massive reg MRs */ + struct mutex slow_path_mutex; int fill_delay; #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING struct ib_odp_caps odp_caps; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index d4ad672b905b..4e9012463c37 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) int err; int i; + mutex_init(&dev->slow_path_mutex); cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); if (!cache->wq) { mlx5_ib_warn(dev, "failed to create work queue\n"); @@ -1182,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto error; } - if (!mr) + if (!mr) { + mutex_lock(&dev->slow_path_mutex); mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, page_shift, access_flags); + mutex_unlock(&dev->slow_path_mutex); + } if (IS_ERR(mr)) { err = PTR_ERR(mr); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7ce97daf26c6..d1e921816bfe 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -2051,8 +2051,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, - to_mcq(init_attr->recv_cq)->mcq.cqn, - to_mcq(init_attr->send_cq)->mcq.cqn); + init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, + init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); qp->trans_qp.xrcdn = xrcdn; @@ -4814,6 +4814,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, udata->inlen)) return ERR_PTR(-EOPNOTSUPP); + if (init_attr->log_ind_tbl_size > + MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { + mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", + init_attr->log_ind_tbl_size, + MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); + return ERR_PTR(-EINVAL); + } + min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); if (udata->outlen && udata->outlen < min_resp_len) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c index 01f71caa3ac4..f2cefb0d9180 100644 --- a/drivers/infiniband/sw/rdmavt/dma.c +++ b/drivers/infiniband/sw/rdmavt/dma.c @@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page, if (WARN_ON(!valid_dma_direction(direction))) return BAD_DMA_ADDRESS; - if (offset + size > PAGE_SIZE) - return BAD_DMA_ADDRESS; - addr = (u64)page_address(page); if (addr) addr += offset; diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index b8258e4f0aea..ffff5a54cb34 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, { int err; struct socket *sock; - struct udp_port_cfg udp_cfg; - struct udp_tunnel_sock_cfg tnl_cfg; - - memset(&udp_cfg, 0, sizeof(udp_cfg)); + struct udp_port_cfg udp_cfg = {0}; + struct udp_tunnel_sock_cfg tnl_cfg = {0}; if (ipv6) { udp_cfg.family = AF_INET6; @@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, return ERR_PTR(err); } - tnl_cfg.sk_user_data = NULL; tnl_cfg.encap_type = 1; tnl_cfg.encap_rcv = rxe_udp_encap_recv; - tnl_cfg.encap_destroy = NULL; /* Setup UDP tunnel */ setup_udp_tunnel_sock(net, sock, &tnl_cfg); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index b8036cfbce04..c3e60e4bde6e 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp) if (qp->sq.queue) { __rxe_do_task(&qp->comp.task); __rxe_do_task(&qp->req.task); + rxe_queue_reset(qp->sq.queue); } /* cleanup attributes */ @@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp) { qp->req.state = QP_STATE_ERROR; qp->resp.state = QP_STATE_ERROR; + qp->attr.qp_state = IB_QPS_ERR; /* drain work and packet queues */ rxe_run_task(&qp->resp.task, 1); diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 08274254eb88..d14bf496d62d 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -84,6 +84,15 @@ err1: return -EINVAL; } +inline void rxe_queue_reset(struct rxe_queue *q) +{ + /* queue is comprised from header and the memory + * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h + * reset only the queue itself and not the management header + */ + memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); +} + struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, unsigned int elem_size) diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h index 239fd609c31e..8c8641c87817 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.h +++ b/drivers/infiniband/sw/rxe/rxe_queue.h @@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe, size_t buf_size, struct rxe_mmap_info **ip_p); +void rxe_queue_reset(struct rxe_queue *q); + struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, unsigned int elem_size); diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 832846b73ea0..22bd9630dcd9 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -696,7 +696,8 @@ next_wqe: qp->req.wqe_index); wqe->state = wqe_state_done; wqe->status = IB_WC_SUCCESS; - goto complete; + __rxe_do_task(&qp->comp.task); + return 0; } payload = mtu; } @@ -745,13 +746,17 @@ err: wqe->status = IB_WC_LOC_PROT_ERR; wqe->state = wqe_state_error; -complete: - if (qp_type(qp) != IB_QPT_RC) { - while (rxe_completer(qp) == 0) - ; - } - - return 0; + /* + * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS + * ---------8<---------8<------------- + * ...Note that if a completion error occurs, a Work Completion + * will always be generated, even if the signaling + * indicator requests an Unsignaled Completion. + * ---------8<---------8<------------- + */ + wqe->wr.send_flags |= IB_SEND_SIGNALED; + __rxe_do_task(&qp->comp.task); + return -EAGAIN; exit: return -EAGAIN; diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index fb4b185dea96..bee267424972 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -1115,10 +1115,6 @@ static int psmouse_extensions(struct psmouse *psmouse, if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2, &max_proto, set_properties, true)) return PSMOUSE_TOUCHKIT_PS2; - - if (psmouse_try_protocol(psmouse, PSMOUSE_BYD, - &max_proto, set_properties, true)) - return PSMOUSE_BYD; } /* diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 58470f5ced04..8c53748a769d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, struct pci_dev *pdev = to_pci_dev(data); struct dmar_pci_notify_info *info; - /* Only care about add/remove events for physical functions */ + /* Only care about add/remove events for physical functions. + * For VFs we actually do the lookup based on the corresponding + * PF in device_to_iommu() anyway. */ if (pdev->is_virtfn) return NOTIFY_DONE; if (action != BUS_NOTIFY_ADD_DEVICE && diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 3965e73db51c..d8376c2d18b3 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf return NULL; if (dev_is_pci(dev)) { + struct pci_dev *pf_pdev; + pdev = to_pci_dev(dev); + /* VFs aren't listed in scope tables; we need to look up + * the PF instead to find the IOMMU. */ + pf_pdev = pci_physfn(pdev); + dev = &pf_pdev->dev; segment = pci_domain_nr(pdev->bus); } else if (has_acpi_companion(dev)) dev = &ACPI_COMPANION(dev)->dev; @@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, tmp) { if (tmp == dev) { + /* For a VF use its original BDF# not that of the PF + * which we used for the IOMMU lookup. Strictly speaking + * we could do this for all PCI devices; we only need to + * get the BDF# from the scope table for ACPI matches. */ + if (pdev->is_virtfn) + goto got_pdev; + *bus = drhd->devices[i].bus; *devfn = drhd->devices[i].devfn; goto out; diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 8ebb3530afa7..cb72e0011310 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) struct page *pages; int order; - order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; - if (order < 0) - order = 0; + /* Start at 2 because it's defined as 2^(1+PSS) */ + iommu->pasid_max = 2 << ecap_pss(iommu->ecap); + /* Eventually I'm promised we will get a multi-level PASID table + * and it won't have to be physically contiguous. Until then, + * limit the size because 8MiB contiguous allocations can be hard + * to come by. The limit of 0x20000, which is 1MiB for each of + * the PASID and PASID-state tables, is somewhat arbitrary. */ + if (iommu->pasid_max > 0x20000) + iommu->pasid_max = 0x20000; + + order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (!pages) { pr_warn("IOMMU: %s: Failed to allocate PASID table\n", @@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); if (ecap_dis(iommu->ecap)) { + /* Just making it explicit... */ + BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry)); pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (pages) iommu->pasid_state_table = page_address(pages); @@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) int intel_svm_free_pasid_tables(struct intel_iommu *iommu) { - int order; - - order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; - if (order < 0) - order = 0; + int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); if (iommu->pasid_table) { free_pages((unsigned long)iommu->pasid_table, order); @@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ } svm->iommu = iommu; - if (pasid_max > 2 << ecap_pss(iommu->ecap)) - pasid_max = 2 << ecap_pss(iommu->ecap); + if (pasid_max > iommu->pasid_max) + pasid_max = iommu->pasid_max; /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ ret = idr_alloc(&iommu->pasid_idr, svm, diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 08c87fadca8c..1f32688c312d 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -65,6 +65,7 @@ #include #include #include +#include #include "mailbox.h" @@ -267,6 +268,8 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) chan->txdone_method |= TXDONE_BY_ACK; + spin_unlock_irqrestore(&chan->lock, flags); + if (pcc_doorbell_irq[subspace_id] > 0) { int rc; @@ -275,12 +278,11 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, if (unlikely(rc)) { dev_err(dev, "failed to register PCC interrupt %d\n", pcc_doorbell_irq[subspace_id]); + pcc_mbox_free_channel(chan); chan = ERR_PTR(rc); } } - spin_unlock_irqrestore(&chan->lock, flags); - return chan; } EXPORT_SYMBOL_GPL(pcc_mbox_request_channel); @@ -304,20 +306,19 @@ void pcc_mbox_free_channel(struct mbox_chan *chan) return; } + if (pcc_doorbell_irq[id] > 0) + devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); + spin_lock_irqsave(&chan->lock, flags); chan->cl = NULL; chan->active_req = NULL; if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) chan->txdone_method = TXDONE_BY_POLL; - if (pcc_doorbell_irq[id] > 0) - devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); - spin_unlock_irqrestore(&chan->lock, flags); } EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); - /** * pcc_send_data - Called from Mailbox Controller code. Used * here only to ring the channel doorbell. The PCC client diff --git a/drivers/media/dvb-frontends/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c index be19afeed7a9..93f59bfea092 100644 --- a/drivers/media/dvb-frontends/gp8psk-fe.c +++ b/drivers/media/dvb-frontends/gp8psk-fe.c @@ -1,5 +1,5 @@ -/* DVB USB compliant Linux driver for the - * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module +/* + * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module * * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) @@ -8,11 +8,9 @@ * * This module is based off the vp7045 and vp702x modules * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation, version 2. - * - * see Documentation/dvb/README.dvb-usb for more information + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation, version 2. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -395,3 +393,8 @@ static struct dvb_frontend_ops gp8psk_fe_ops = { .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage }; + +MODULE_AUTHOR("Alan Nisota "); +MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S"); +MODULE_VERSION("1.1"); +MODULE_LICENSE("GPL"); diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index 317ef63ee789..8d96a22647b3 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c @@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv) int i; tuner_dbg("%s called\n", __func__); + /* free allocated f/w string */ + if (priv->fname != firmware_name) + kfree(priv->fname); + priv->fname = NULL; + + priv->state = XC2028_NO_FIRMWARE; + memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + if (!priv->firm) return; @@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv) priv->firm = NULL; priv->firm_size = 0; - priv->state = XC2028_NO_FIRMWARE; - - memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); } static int load_all_firmwares(struct dvb_frontend *fe, @@ -884,9 +889,8 @@ read_not_reliable: return 0; fail: - priv->state = XC2028_NO_FIRMWARE; + free_firmware(priv); - memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); if (retry_count < 8) { msleep(50); retry_count++; @@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe) mutex_lock(&xc2028_list_mutex); /* only perform final cleanup if this is the last instance */ - if (hybrid_tuner_report_instance_count(priv) == 1) { + if (hybrid_tuner_report_instance_count(priv) == 1) free_firmware(priv); - kfree(priv->ctrl.fname); - priv->ctrl.fname = NULL; - } if (priv) hybrid_tuner_release_state(priv); @@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) /* * Copy the config data. - * For the firmware name, keep a local copy of the string, - * in order to avoid troubles during device release. */ - kfree(priv->ctrl.fname); - priv->ctrl.fname = NULL; memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); - if (p->fname) { - priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); - if (priv->ctrl.fname == NULL) { - rc = -ENOMEM; - goto unlock; - } - } /* * If firmware name changed, frees firmware. As free_firmware will @@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) if (priv->state == XC2028_NO_FIRMWARE) { if (!firmware_name[0]) - priv->fname = priv->ctrl.fname; + priv->fname = kstrdup(p->fname, GFP_KERNEL); else priv->fname = firmware_name; + if (!priv->fname) { + rc = -ENOMEM; + goto unlock; + } + rc = request_firmware_nowait(THIS_MODULE, 1, priv->fname, priv->i2c_props.adap->dev.parent, diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 3228fd182a99..9ff243970e93 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -123,19 +123,6 @@ static const struct intel_lpss_platform_info apl_i2c_info = { .properties = apl_i2c_properties, }; -static const struct intel_lpss_platform_info kbl_info = { - .clk_rate = 120000000, -}; - -static const struct intel_lpss_platform_info kbl_uart_info = { - .clk_rate = 120000000, - .clk_con_id = "baudclk", -}; - -static const struct intel_lpss_platform_info kbl_i2c_info = { - .clk_rate = 133000000, -}; - static const struct pci_device_id intel_lpss_pci_ids[] = { /* BXT A-Step */ { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, @@ -207,15 +194,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, /* KBL-H */ - { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info }, - { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info }, - { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info }, - { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info }, - { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info }, - { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info }, + { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info }, + { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info }, + { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info }, { } }; MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 41b113875d64..70c646b0097d 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev) for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) lpss->priv_ctx[i] = readl(lpss->priv + i * 4); - /* Put the device into reset state */ - writel(0, lpss->priv + LPSS_PRIV_RESETS); - return 0; } EXPORT_SYMBOL_GPL(intel_lpss_suspend); diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c index 43e54b7e908f..f9a8c5203873 100644 --- a/drivers/mfd/intel_soc_pmic_bxtwc.c +++ b/drivers/mfd/intel_soc_pmic_bxtwc.c @@ -86,6 +86,7 @@ enum bxtwc_irqs_level2 { BXTWC_THRM2_IRQ, BXTWC_BCU_IRQ, BXTWC_ADC_IRQ, + BXTWC_USBC_IRQ, BXTWC_CHGR0_IRQ, BXTWC_CHGR1_IRQ, BXTWC_GPIO0_IRQ, @@ -111,7 +112,8 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = { REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff), REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f), REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff), - REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f), + REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)), + REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f), REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f), REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff), REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f), @@ -146,7 +148,7 @@ static struct resource adc_resources[] = { }; static struct resource usbc_resources[] = { - DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"), + DEFINE_RES_IRQ(BXTWC_USBC_IRQ), }; static struct resource charger_resources[] = { diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 3ac486a597f3..c57e407020f1 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) clones[i]); } + put_device(dev); + return 0; } EXPORT_SYMBOL(mfd_clone_cell); diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index cfdae8a3d779..b0c7bcdaf5df 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c @@ -851,6 +851,8 @@ static int stmpe_reset(struct stmpe *stmpe) if (ret < 0) return ret; + msleep(10); + timeout = jiffies + msecs_to_jiffies(100); while (time_before(jiffies, timeout)) { ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]); diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 2f2225e845ef..b93fe4c4957a 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -73,8 +73,10 @@ static struct syscon *of_syscon_register(struct device_node *np) /* Parse the device's DT node for an endianness specification */ if (of_property_read_bool(np, "big-endian")) syscon_config.val_format_endian = REGMAP_ENDIAN_BIG; - else if (of_property_read_bool(np, "little-endian")) + else if (of_property_read_bool(np, "little-endian")) syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE; + else if (of_property_read_bool(np, "native-endian")) + syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE; /* * search for reg-io-width property in DT. If it is not provided, diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 7eec619a6023..8588dbad3301 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c @@ -393,8 +393,13 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq) BUG(); goto err; } - - ret = devm_regulator_bulk_get(wm8994->dev, wm8994->num_supplies, + + /* + * Can't use devres helper here as some of the supplies are provided by + * wm8994->dev's children (regulators) and those regulators are + * unregistered by the devres core before the supplies are freed. + */ + ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies, wm8994->supplies); if (ret != 0) { dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret); @@ -405,7 +410,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq) wm8994->supplies); if (ret != 0) { dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret); - goto err; + goto err_regulator_free; } ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET); @@ -596,6 +601,8 @@ err_irq: err_enable: regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies); +err_regulator_free: + regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); err: mfd_remove_devices(wm8994->dev); return ret; @@ -604,10 +611,11 @@ err: static void wm8994_device_exit(struct wm8994 *wm8994) { pm_runtime_disable(wm8994->dev); - mfd_remove_devices(wm8994->dev); wm8994_irq_exit(wm8994); regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies); + regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); + mfd_remove_devices(wm8994->dev); } static const struct of_device_id wm8994_of_match[] = { diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 50a674be6655..df478ae72e23 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -1058,6 +1058,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) spin_unlock_irqrestore(&host->irq_lock, irqflags); if (host->dma_ops->start(host, sg_len)) { + host->dma_ops->stop(host); /* We can't do DMA, try PIO for this one */ dev_dbg(host->dev, "%s: fall back to PIO mode for current transfer\n", diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index fb71c866eacc..1bb11e4a9fe5 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host, return ret; } } + /* + * The DAT[3:0] line signal levels and the CMD line signal level are + * not compatible with standard SDHC register. The line signal levels + * DAT[7:0] are at bits 31:24 and the command line signal level is at + * bit 23. All other bits are the same as in the standard SDHC + * register. + */ + if (spec_reg == SDHCI_PRESENT_STATE) { + ret = value & 0x000fffff; + ret |= (value >> 4) & SDHCI_DATA_LVL_MASK; + ret |= (value << 1) & SDHCI_CMD_LVL; + return ret; + } + ret = value; return ret; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 766df17fb7eb..2570455b219a 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -73,6 +73,7 @@ #define SDHCI_DATA_LVL_MASK 0x00F00000 #define SDHCI_DATA_LVL_SHIFT 20 #define SDHCI_DATA_0_LVL_MASK 0x00100000 +#define SDHCI_CMD_LVL 0x01000000 #define SDHCI_HOST_CONTROL 0x28 #define SDHCI_CTRL_LED 0x01 diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 3eb7430dffbf..f8ff25c8ee2e 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -142,6 +142,9 @@ struct plx_pci_card { #define CTI_PCI_VENDOR_ID 0x12c4 #define CTI_PCI_DEVICE_ID_CRG001 0x0900 +#define MOXA_PCI_VENDOR_ID 0x1393 +#define MOXA_PCI_DEVICE_ID 0x0100 + static void plx_pci_reset_common(struct pci_dev *pdev); static void plx9056_pci_reset_common(struct pci_dev *pdev); static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); @@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = { /* based on PLX9030 */ }; +static struct plx_pci_card_info plx_pci_card_info_moxa = { + "MOXA", 2, + PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} }, + &plx_pci_reset_common + /* based on PLX9052 */ +}; + static const struct pci_device_id plx_pci_tbl[] = { { /* Adlink PCI-7841/cPCI-7841 */ @@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = { 0, 0, (kernel_ulong_t)&plx_pci_card_info_elcus }, + { + /* moxa */ + MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_moxa + }, { 0,} }; MODULE_DEVICE_TABLE(pci, plx_pci_tbl); diff --git a/drivers/net/can/usb/peak_usb/pcan_ucan.h b/drivers/net/can/usb/peak_usb/pcan_ucan.h index e8fc4952c6b0..2147678f0225 100644 --- a/drivers/net/can/usb/peak_usb/pcan_ucan.h +++ b/drivers/net/can/usb/peak_usb/pcan_ucan.h @@ -43,11 +43,22 @@ struct __packed pucan_command { u16 args[3]; }; +#define PUCAN_TSLOW_BRP_BITS 10 +#define PUCAN_TSLOW_TSGEG1_BITS 8 +#define PUCAN_TSLOW_TSGEG2_BITS 7 +#define PUCAN_TSLOW_SJW_BITS 7 + +#define PUCAN_TSLOW_BRP_MASK ((1 << PUCAN_TSLOW_BRP_BITS) - 1) +#define PUCAN_TSLOW_TSEG1_MASK ((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1) +#define PUCAN_TSLOW_TSEG2_MASK ((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1) +#define PUCAN_TSLOW_SJW_MASK ((1 << PUCAN_TSLOW_SJW_BITS) - 1) + /* uCAN TIMING_SLOW command fields */ -#define PUCAN_TSLOW_SJW_T(s, t) (((s) & 0xf) | ((!!(t)) << 7)) -#define PUCAN_TSLOW_TSEG2(t) ((t) & 0xf) -#define PUCAN_TSLOW_TSEG1(t) ((t) & 0x3f) -#define PUCAN_TSLOW_BRP(b) ((b) & 0x3ff) +#define PUCAN_TSLOW_SJW_T(s, t) (((s) & PUCAN_TSLOW_SJW_MASK) | \ + ((!!(t)) << 7)) +#define PUCAN_TSLOW_TSEG2(t) ((t) & PUCAN_TSLOW_TSEG2_MASK) +#define PUCAN_TSLOW_TSEG1(t) ((t) & PUCAN_TSLOW_TSEG1_MASK) +#define PUCAN_TSLOW_BRP(b) ((b) & PUCAN_TSLOW_BRP_MASK) struct __packed pucan_timing_slow { __le16 opcode_channel; @@ -60,11 +71,21 @@ struct __packed pucan_timing_slow { __le16 brp; /* BaudRate Prescaler */ }; +#define PUCAN_TFAST_BRP_BITS 10 +#define PUCAN_TFAST_TSGEG1_BITS 5 +#define PUCAN_TFAST_TSGEG2_BITS 4 +#define PUCAN_TFAST_SJW_BITS 4 + +#define PUCAN_TFAST_BRP_MASK ((1 << PUCAN_TFAST_BRP_BITS) - 1) +#define PUCAN_TFAST_TSEG1_MASK ((1 << PUCAN_TFAST_TSGEG1_BITS) - 1) +#define PUCAN_TFAST_TSEG2_MASK ((1 << PUCAN_TFAST_TSGEG2_BITS) - 1) +#define PUCAN_TFAST_SJW_MASK ((1 << PUCAN_TFAST_SJW_BITS) - 1) + /* uCAN TIMING_FAST command fields */ -#define PUCAN_TFAST_SJW(s) ((s) & 0x3) -#define PUCAN_TFAST_TSEG2(t) ((t) & 0x7) -#define PUCAN_TFAST_TSEG1(t) ((t) & 0xf) -#define PUCAN_TFAST_BRP(b) ((b) & 0x3ff) +#define PUCAN_TFAST_SJW(s) ((s) & PUCAN_TFAST_SJW_MASK) +#define PUCAN_TFAST_TSEG2(t) ((t) & PUCAN_TFAST_TSEG2_MASK) +#define PUCAN_TFAST_TSEG1(t) ((t) & PUCAN_TFAST_TSEG1_MASK) +#define PUCAN_TFAST_BRP(b) ((b) & PUCAN_TFAST_BRP_MASK) struct __packed pucan_timing_fast { __le16 opcode_channel; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index c06382cdfdfe..f3141ca56bc3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -39,6 +39,7 @@ static struct usb_device_id peak_usb_table[] = { {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID)}, {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID)}, {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID)}, + {USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID)}, {} /* Terminating entry */ }; @@ -50,6 +51,7 @@ static const struct peak_usb_adapter *const peak_usb_adapters_list[] = { &pcan_usb_pro, &pcan_usb_fd, &pcan_usb_pro_fd, + &pcan_usb_x6, }; /* diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index 506fe506c9d3..3cbfb069893d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -27,6 +27,7 @@ #define PCAN_USBPRO_PRODUCT_ID 0x000d #define PCAN_USBPROFD_PRODUCT_ID 0x0011 #define PCAN_USBFD_PRODUCT_ID 0x0012 +#define PCAN_USBX6_PRODUCT_ID 0x0014 #define PCAN_USB_DRIVER_NAME "peak_usb" @@ -90,6 +91,7 @@ extern const struct peak_usb_adapter pcan_usb; extern const struct peak_usb_adapter pcan_usb_pro; extern const struct peak_usb_adapter pcan_usb_fd; extern const struct peak_usb_adapter pcan_usb_pro_fd; +extern const struct peak_usb_adapter pcan_usb_x6; struct peak_time_ref { struct timeval tv_host_0, tv_host; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index ce44a033f63b..304732550f0a 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -993,24 +993,24 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev) static const struct can_bittiming_const pcan_usb_fd_const = { .name = "pcan_usb_fd", .tseg1_min = 1, - .tseg1_max = 64, + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, - .tseg2_max = 16, - .sjw_max = 16, + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, - .brp_max = 1024, + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_fd_data_const = { .name = "pcan_usb_fd", .tseg1_min = 1, - .tseg1_max = 16, + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, - .tseg2_max = 8, - .sjw_max = 4, + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, - .brp_max = 1024, + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; @@ -1065,24 +1065,24 @@ const struct peak_usb_adapter pcan_usb_fd = { static const struct can_bittiming_const pcan_usb_pro_fd_const = { .name = "pcan_usb_pro_fd", .tseg1_min = 1, - .tseg1_max = 64, + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, - .tseg2_max = 16, - .sjw_max = 16, + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, - .brp_max = 1024, + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_pro_fd_data_const = { .name = "pcan_usb_pro_fd", .tseg1_min = 1, - .tseg1_max = 16, + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, - .tseg2_max = 8, - .sjw_max = 4, + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, - .brp_max = 1024, + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; @@ -1132,3 +1132,75 @@ const struct peak_usb_adapter pcan_usb_pro_fd = { .do_get_berr_counter = pcan_usb_fd_get_berr_counter, }; + +/* describes the PCAN-USB X6 adapter */ +static const struct can_bittiming_const pcan_usb_x6_const = { + .name = "pcan_usb_x6", + .tseg1_min = 1, + .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), + .tseg2_min = 1, + .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), + .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), + .brp_min = 1, + .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), + .brp_inc = 1, +}; + +static const struct can_bittiming_const pcan_usb_x6_data_const = { + .name = "pcan_usb_x6", + .tseg1_min = 1, + .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), + .tseg2_min = 1, + .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), + .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), + .brp_min = 1, + .brp_max = (1 << PUCAN_TFAST_BRP_BITS), + .brp_inc = 1, +}; + +const struct peak_usb_adapter pcan_usb_x6 = { + .name = "PCAN-USB X6", + .device_id = PCAN_USBX6_PRODUCT_ID, + .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, + .ctrlmode_supported = CAN_CTRLMODE_FD | + CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY, + .clock = { + .freq = PCAN_UFD_CRYSTAL_HZ, + }, + .bittiming_const = &pcan_usb_x6_const, + .data_bittiming_const = &pcan_usb_x6_data_const, + + /* size of device private data */ + .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), + + /* timestamps usage */ + .ts_used_bits = 32, + .ts_period = 1000000, /* calibration period in ts. */ + .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ + .us_per_ts_shift = 0, + + /* give here messages in/out endpoints */ + .ep_msg_in = PCAN_USBPRO_EP_MSGIN, + .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1}, + + /* size of rx/tx usb buffers */ + .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, + .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, + + /* device callbacks */ + .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ + .dev_init = pcan_usb_fd_init, + + .dev_exit = pcan_usb_fd_exit, + .dev_free = pcan_usb_fd_free, + .dev_set_bus = pcan_usb_fd_set_bus, + .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, + .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, + .dev_decode_buf = pcan_usb_fd_decode_buf, + .dev_start = pcan_usb_fd_start, + .dev_stop = pcan_usb_fd_stop, + .dev_restart_async = pcan_usb_fd_restart_async, + .dev_encode_msg = pcan_usb_fd_encode_msg, + + .do_get_berr_counter = pcan_usb_fd_get_berr_counter, +}; diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 7717b19dc806..947adda3397d 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -962,9 +962,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port, vl->members |= BIT(port) | BIT(cpu_port); if (untagged) - vl->untag |= BIT(port) | BIT(cpu_port); + vl->untag |= BIT(port); else - vl->untag &= ~(BIT(port) | BIT(cpu_port)); + vl->untag &= ~BIT(port); + vl->untag &= ~BIT(cpu_port); b53_set_vlan_entry(dev, vid, vl); b53_fast_age_vlan(dev, vid); @@ -973,8 +974,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port, if (pvid) { b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), vlan->vid_end); - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), - vlan->vid_end); b53_fast_age_vlan(dev, vid); } } @@ -984,7 +983,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, { struct b53_device *dev = ds->priv; bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - unsigned int cpu_port = dev->cpu_port; struct b53_vlan *vl; u16 vid; u16 pvid; @@ -997,8 +995,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, b53_get_vlan_entry(dev, vid, vl); vl->members &= ~BIT(port); - if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) - vl->members = 0; if (pvid == vid) { if (is5325(dev) || is5365(dev)) @@ -1007,18 +1003,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, pvid = 0; } - if (untagged) { + if (untagged) vl->untag &= ~(BIT(port)); - if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port)) - vl->untag = 0; - } b53_set_vlan_entry(dev, vid, vl); b53_fast_age_vlan(dev, vid); } b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); - b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid); b53_fast_age_vlan(dev, pvid); return 0; diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index e3ee27ce13dd..9ec33b51a0ed 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -588,6 +588,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct ethtool_eee *p = &priv->port_sts[port].eee; u32 id_mode_dis = 0, port_mode; const char *str = NULL; u32 reg; @@ -662,6 +663,9 @@ force_link: reg |= DUPLX_MODE; core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); + + if (!phydev->is_pseudo_fixed_link) + p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); } static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index bda31f308cc2..a0eee7218695 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -400,12 +400,6 @@ static int tse_rx(struct altera_tse_private *priv, int limit) skb_put(skb, pktlength); - /* make cache consistent with receive packet buffer */ - dma_sync_single_for_cpu(priv->device, - priv->rx_ring[entry].dma_addr, - priv->rx_ring[entry].len, - DMA_FROM_DEVICE); - dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr, priv->rx_ring[entry].len, DMA_FROM_DEVICE); @@ -469,7 +463,6 @@ static int tse_tx_complete(struct altera_tse_private *priv) if (unlikely(netif_queue_stopped(priv->dev) && tse_tx_avail(priv) > TSE_TX_THRESH(priv))) { - netif_tx_lock(priv->dev); if (netif_queue_stopped(priv->dev) && tse_tx_avail(priv) > TSE_TX_THRESH(priv)) { if (netif_msg_tx_done(priv)) @@ -477,7 +470,6 @@ static int tse_tx_complete(struct altera_tse_private *priv) __func__); netif_wake_queue(priv->dev); } - netif_tx_unlock(priv->dev); } spin_unlock(&priv->tx_lock); @@ -592,10 +584,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) buffer->dma_addr = dma_addr; buffer->len = nopaged_len; - /* Push data out of the cache hierarchy into main memory */ - dma_sync_single_for_device(priv->device, buffer->dma_addr, - buffer->len, DMA_TO_DEVICE); - priv->dmaops->tx_buffer(priv, buffer); skb_tx_timestamp(skb); @@ -819,6 +807,8 @@ static int init_phy(struct net_device *dev) if (!phydev) { netdev_err(dev, "Could not find the PHY\n"); + if (fixed_link) + of_phy_deregister_fixed_link(priv->device->of_node); return -ENODEV; } @@ -1545,10 +1535,15 @@ err_free_netdev: static int altera_tse_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct altera_tse_private *priv = netdev_priv(ndev); - if (ndev->phydev) + if (ndev->phydev) { phy_disconnect(ndev->phydev); + if (of_phy_is_fixed_link(priv->device->of_node)) + of_phy_deregister_fixed_link(priv->device->of_node); + } + platform_set_drvdata(pdev, NULL); altera_tse_mdio_destroy(ndev); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 9de078819aa6..4f7635178200 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -829,7 +829,7 @@ static int xgbe_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int xgbe_suspend(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); @@ -874,7 +874,7 @@ static int xgbe_resume(struct device *dev) return ret; } -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_ACPI static const struct acpi_device_id xgbe_acpi_match[] = { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index c481f104a8fe..5390ae89136c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) return num_msgs; } -static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) -{ - u32 data = 0x7777; - - xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); - xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); - xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); - xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); - xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); -} - void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, struct xgene_enet_pdata *pdata, enum xgene_enet_err_code status) @@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = { .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, - .coalesce = xgene_enet_setup_coalescing, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 8456337a237d..06e598c8bc16 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -55,8 +55,10 @@ enum xgene_enet_rm { #define PREFETCH_BUF_EN BIT(21) #define CSR_RING_ID_BUF 0x000c #define CSR_PBM_COAL 0x0014 +#define CSR_PBM_CTICK0 0x0018 #define CSR_PBM_CTICK1 0x001c #define CSR_PBM_CTICK2 0x0020 +#define CSR_PBM_CTICK3 0x0024 #define CSR_THRESHOLD0_SET1 0x0030 #define CSR_THRESHOLD1_SET1 0x0034 #define CSR_RING_NE_INT_MODE 0x017c diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 429f18fc5503..8158d4698734 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); } - pdata->ring_ops->coalesce(pdata->tx_ring[0]); + if (pdata->ring_ops->coalesce) + pdata->ring_ops->coalesce(pdata->tx_ring[0]); pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; return 0; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c index 2b76732add5d..af51dd5844ce 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c @@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); ring_cfg[3] |= SET_BIT(X2_DEQINTEN); } - ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1); + ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2); addr >>= 8; ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); @@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) { - u32 data = 0x7777; + u32 data = 0x77777777; xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data); xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); - xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); - xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); - xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10); } struct xgene_ring_ops xgene_ring2_ops = { diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index b0da9693f28a..be865b4dada2 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -460,7 +460,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev) if (ndev->flags & IFF_ALLMULTI) { arc_reg_set(priv, R_LAFL, ~0); arc_reg_set(priv, R_LAFH, ~0); - } else { + } else if (ndev->flags & IFF_MULTICAST) { struct netdev_hw_addr *ha; unsigned int filter[2] = { 0, 0 }; int bit; @@ -472,6 +472,9 @@ static void arc_emac_set_rx_mode(struct net_device *ndev) arc_reg_set(priv, R_LAFL, filter[0]); arc_reg_set(priv, R_LAFH, filter[1]); + } else { + arc_reg_set(priv, R_LAFL, 0); + arc_reg_set(priv, R_LAFH, 0); } } } @@ -764,8 +767,6 @@ int arc_emac_probe(struct net_device *ndev, int interface) ndev->netdev_ops = &arc_emac_netdev_ops; ndev->ethtool_ops = &arc_emac_ethtool_ops; ndev->watchdog_timeo = TX_TIMEOUT; - /* FIXME :: no multicast support yet */ - ndev->flags &= ~IFF_MULTICAST; priv = netdev_priv(ndev); priv->dev = dev; diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 00c38bf151e6..e078d8da978c 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -1466,12 +1466,12 @@ static int nb8800_probe(struct platform_device *pdev) ret = nb8800_hw_init(dev); if (ret) - goto err_free_bus; + goto err_deregister_fixed_link; if (ops && ops->init) { ret = ops->init(dev); if (ret) - goto err_free_bus; + goto err_deregister_fixed_link; } dev->netdev_ops = &nb8800_netdev_ops; @@ -1504,6 +1504,9 @@ static int nb8800_probe(struct platform_device *pdev) err_free_dma: nb8800_dma_free(dev); +err_deregister_fixed_link: + if (of_phy_is_fixed_link(pdev->dev.of_node)) + of_phy_deregister_fixed_link(pdev->dev.of_node); err_free_bus: of_node_put(priv->phy_node); mdiobus_unregister(bus); @@ -1521,6 +1524,8 @@ static int nb8800_remove(struct platform_device *pdev) struct nb8800_priv *priv = netdev_priv(ndev); unregister_netdev(ndev); + if (of_phy_is_fixed_link(pdev->dev.of_node)) + of_phy_deregister_fixed_link(pdev->dev.of_node); of_node_put(priv->phy_node); mdiobus_unregister(priv->mii_bus); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index c3354b9941d1..25d1eb4933d0 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1755,13 +1755,13 @@ static int bcm_sysport_probe(struct platform_device *pdev) if (priv->irq0 <= 0 || priv->irq1 <= 0) { dev_err(&pdev->dev, "invalid interrupts\n"); ret = -EINVAL; - goto err; + goto err_free_netdev; } priv->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); - goto err; + goto err_free_netdev; } priv->netdev = dev; @@ -1779,7 +1779,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) ret = of_phy_register_fixed_link(dn); if (ret) { dev_err(&pdev->dev, "failed to register fixed PHY\n"); - goto err; + goto err_free_netdev; } priv->phy_dn = dn; @@ -1821,7 +1821,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) ret = register_netdev(dev); if (ret) { dev_err(&pdev->dev, "failed to register net_device\n"); - goto err; + goto err_deregister_fixed_link; } priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; @@ -1832,7 +1832,11 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv->base, priv->irq0, priv->irq1, txq, rxq); return 0; -err: + +err_deregister_fixed_link: + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); +err_free_netdev: free_netdev(dev); return ret; } @@ -1840,11 +1844,14 @@ err: static int bcm_sysport_remove(struct platform_device *pdev) { struct net_device *dev = dev_get_drvdata(&pdev->dev); + struct device_node *dn = pdev->dev.of_node; /* Not much to do, ndo_close has been called * and we use managed allocations */ unregister_netdev(dev); + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); free_netdev(dev); dev_set_drvdata(&pdev->dev, NULL); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 31ca204b38d2..49f4cafe5438 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, u32 ctl; ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); + + /* preserve ONLY bits 16-17 from current hardware value */ + ctl &= BGMAC_DMA_RX_ADDREXT_MASK; + if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { ctl &= ~BGMAC_DMA_RX_BL_MASK; ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; @@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, ctl &= ~BGMAC_DMA_RX_PT_MASK; ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; } - ctl &= BGMAC_DMA_RX_ADDREXT_MASK; ctl |= BGMAC_DMA_RX_ENABLE; ctl |= BGMAC_DMA_RX_PARITY_DISABLE; ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; @@ -1046,9 +1049,9 @@ static void bgmac_enable(struct bgmac *bgmac) mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; - if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0) + if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); - if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) + if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2) bgmac_cco_ctl_maskset(bgmac, 1, ~0, BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index b3791b394715..1f7034d739b0 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -49,6 +49,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_CNIC) #define BCM_CNIC 1 @@ -4764,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp) BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); } -static int -bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) +static void +bnx2_wait_dma_complete(struct bnx2 *bp) { u32 val; - int i, rc = 0; - u8 old_port; + int i; - /* Wait for the current PCI transaction to complete before - * issuing a reset. */ + /* + * Wait for the current PCI transaction to complete before + * issuing a reset. + */ if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, @@ -4796,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) } } + return; +} + + +static int +bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) +{ + u32 val; + int i, rc = 0; + u8 old_port; + + /* Wait for the current PCI transaction to complete before + * issuing a reset. */ + bnx2_wait_dma_complete(bp); + /* Wait for the firmware to tell us it is ok to issue a reset. */ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); @@ -6361,6 +6378,10 @@ bnx2_open(struct net_device *dev) struct bnx2 *bp = netdev_priv(dev); int rc; + rc = bnx2_request_firmware(bp); + if (rc < 0) + goto out; + netif_carrier_off(dev); bnx2_disable_int(bp); @@ -6429,6 +6450,7 @@ open_err: bnx2_free_irq(bp); bnx2_free_mem(bp); bnx2_del_napi(bp); + bnx2_release_firmware(bp); goto out; } @@ -8575,12 +8597,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); - rc = bnx2_request_firmware(bp); - if (rc < 0) - goto error; + /* + * In-flight DMA from 1st kernel could continue going in kdump kernel. + * New io-page table has been created before bnx2 does reset at open stage. + * We have to wait for the in-flight DMA to complete to avoid it look up + * into the newly created io-page table. + */ + if (is_kdump_kernel()) + bnx2_wait_dma_complete(bp); - - bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | @@ -8613,7 +8638,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; error: - bnx2_release_firmware(bp); pci_iounmap(pdev, bp->regview); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a9f9f3738022..ee1a803aa11a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1811,6 +1811,9 @@ static int bnxt_busy_poll(struct napi_struct *napi) if (atomic_read(&bp->intr_sem) != 0) return LL_FLUSH_FAILED; + if (!bp->link_info.link_up) + return LL_FLUSH_FAILED; + if (!bnxt_lock_poll(bnapi)) return LL_FLUSH_BUSY; @@ -3210,11 +3213,17 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, goto err_out; } - if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN) + switch (tunnel_type) { + case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; - - else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE) + break; + case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; + break; + default: + break; + } + err_out: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4934,6 +4943,10 @@ static void bnxt_del_napi(struct bnxt *bp) napi_hash_del(&bnapi->napi); netif_napi_del(&bnapi->napi); } + /* We called napi_hash_del() before netif_napi_del(), we need + * to respect an RCU grace period before freeing napi structures. + */ + synchronize_net(); } static void bnxt_init_napi(struct bnxt *bp) @@ -6309,6 +6322,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, struct tc_to_netdev *ntc) { struct bnxt *bp = netdev_priv(dev); + bool sh = false; u8 tc; if (ntc->type != TC_SETUP_MQPRIO) @@ -6325,12 +6339,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, if (netdev_get_num_tc(dev) == tc) return 0; + if (bp->flags & BNXT_FLAG_SHARED_RINGS) + sh = true; + if (tc) { int max_rx_rings, max_tx_rings, rc; - bool sh = false; - - if (bp->flags & BNXT_FLAG_SHARED_RINGS) - sh = true; rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) @@ -6348,7 +6361,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, bp->tx_nr_rings = bp->tx_nr_rings_per_tc; netdev_reset_tc(dev); } - bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); + bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : + bp->tx_nr_rings + bp->rx_nr_rings; bp->num_stat_ctxs = bp->cp_nr_rings; if (netif_running(bp->dev)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index ec6cd18842c3..60e2af8678bd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) if (vf->flags & BNXT_VF_LINK_UP) { /* if physical link is down, force link up on VF */ - if (phy_qcfg_resp.link == - PORT_PHY_QCFG_RESP_LINK_NO_LINK) { + if (phy_qcfg_resp.link != + PORT_PHY_QCFG_RESP_LINK_LINK) { phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_LINK; phy_qcfg_resp.link_speed = cpu_to_le16( diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 4464bc5db934..a4e60e56c14f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1172,6 +1172,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; unsigned int pkts_compl = 0; @@ -1199,13 +1200,13 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, if (tx_cb_ptr->skb) { pkts_compl++; bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; - dma_unmap_single(&dev->dev, + dma_unmap_single(kdev, dma_unmap_addr(tx_cb_ptr, dma_addr), dma_unmap_len(tx_cb_ptr, dma_len), DMA_TO_DEVICE); bcmgenet_free_cb(tx_cb_ptr); } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { - dma_unmap_page(&dev->dev, + dma_unmap_page(kdev, dma_unmap_addr(tx_cb_ptr, dma_addr), dma_unmap_len(tx_cb_ptr, dma_len), DMA_TO_DEVICE); @@ -1775,6 +1776,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) { + struct device *kdev = &priv->pdev->dev; struct enet_cb *cb; int i; @@ -1782,7 +1784,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) cb = &priv->rx_cbs[i]; if (dma_unmap_addr(cb, dma_addr)) { - dma_unmap_single(&priv->dev->dev, + dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), priv->rx_buf_len, DMA_FROM_DEVICE); dma_unmap_addr_set(cb, dma_addr, 0); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 457c3bc8cfff..e87607621e62 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -542,8 +542,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) /* Make sure we initialize MoCA PHYs with a link down */ if (phy_mode == PHY_INTERFACE_MODE_MOCA) { phydev = of_phy_find_device(dn); - if (phydev) + if (phydev) { phydev->link = 0; + put_device(&phydev->mdio.dev); + } } return 0; @@ -625,6 +627,7 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) int bcmgenet_mii_init(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; int ret; ret = bcmgenet_mii_alloc(priv); @@ -638,6 +641,8 @@ int bcmgenet_mii_init(struct net_device *dev) return 0; out: + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); @@ -647,7 +652,10 @@ out: void bcmgenet_mii_exit(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *dn = priv->pdev->dev.of_node; + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index f9df4b5ae90e..f42f672b0e7e 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) return 0; hw_cons = *(tcb->hw_consumer_index); + rmb(); cons = tcb->consumer_index; q_depth = tcb->q_depth; @@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) BNA_QE_INDX_INC(prod, q_depth); tcb->producer_index = prod; - smp_mb(); + wmb(); if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) return NETDEV_TX_OK; @@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) skb_tx_timestamp(skb); bna_txq_prod_indx_doorbell(tcb); - smp_mb(); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index b32444a3ed79..ec09fcece711 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -975,6 +975,7 @@ static inline void macb_init_rx_ring(struct macb *bp) addr += bp->rx_buffer_size; } bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); + bp->rx_tail = 0; } static int macb_rx(struct macb *bp, int budget) @@ -1156,6 +1157,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(RXUBR)) { ctrl = macb_readl(bp, NCR); macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); + wmb(); macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) @@ -1616,8 +1618,6 @@ static void macb_init_rings(struct macb *bp) bp->queues[0].tx_head = 0; bp->queues[0].tx_tail = 0; bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); - - bp->rx_tail = 0; } static void macb_reset_hw(struct macb *bp) @@ -2673,6 +2673,12 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) lp->skb_length = skb->len; lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(NULL, lp->skb_physaddr)) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + netdev_err(dev, "%s: DMA mapping error\n", __func__); + return NETDEV_TX_OK; + } /* Set address of the data in the Transmit Address register */ macb_writel(lp, TAR, lp->skb_physaddr); @@ -2764,6 +2770,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id) if (intstatus & MACB_BIT(RXUBR)) { ctl = macb_readl(lp, NCR); macb_writel(lp, NCR, ctl & ~MACB_BIT(RE)); + wmb(); macb_writel(lp, NCR, ctl | MACB_BIT(RE)); } diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 30426109711c..86bd93ce2ea3 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -47,7 +47,7 @@ /* Min/Max packet size */ #define NIC_HW_MIN_FRS 64 -#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ +#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */ /* Max pkinds */ #define NIC_MAX_PKIND 16 @@ -178,11 +178,11 @@ enum tx_stats_reg_offset { struct nicvf_hw_stats { u64 rx_bytes; + u64 rx_frames; u64 rx_ucast_frames; u64 rx_bcast_frames; u64 rx_mcast_frames; - u64 rx_fcs_errors; - u64 rx_l2_errors; + u64 rx_drops; u64 rx_drop_red; u64 rx_drop_red_bytes; u64 rx_drop_overrun; @@ -191,6 +191,19 @@ struct nicvf_hw_stats { u64 rx_drop_mcast; u64 rx_drop_l3_bcast; u64 rx_drop_l3_mcast; + u64 rx_fcs_errors; + u64 rx_l2_errors; + + u64 tx_bytes; + u64 tx_frames; + u64 tx_ucast_frames; + u64 tx_bcast_frames; + u64 tx_mcast_frames; + u64 tx_drops; +}; + +struct nicvf_drv_stats { + /* CQE Rx errs */ u64 rx_bgx_truncated_pkts; u64 rx_jabber_errs; u64 rx_fcs_errs; @@ -216,34 +229,30 @@ struct nicvf_hw_stats { u64 rx_l4_pclp; u64 rx_truncated_pkts; - u64 tx_bytes_ok; - u64 tx_ucast_frames_ok; - u64 tx_bcast_frames_ok; - u64 tx_mcast_frames_ok; - u64 tx_drops; -}; - -struct nicvf_drv_stats { - /* Rx */ - u64 rx_frames_ok; - u64 rx_frames_64; - u64 rx_frames_127; - u64 rx_frames_255; - u64 rx_frames_511; - u64 rx_frames_1023; - u64 rx_frames_1518; - u64 rx_frames_jumbo; - u64 rx_drops; + /* CQE Tx errs */ + u64 tx_desc_fault; + u64 tx_hdr_cons_err; + u64 tx_subdesc_err; + u64 tx_max_size_exceeded; + u64 tx_imm_size_oflow; + u64 tx_data_seq_err; + u64 tx_mem_seq_err; + u64 tx_lock_viol; + u64 tx_data_fault; + u64 tx_tstmp_conflict; + u64 tx_tstmp_timeout; + u64 tx_mem_fault; + u64 tx_csum_overlap; + u64 tx_csum_overflow; + /* driver debug stats */ u64 rcv_buffer_alloc_failures; - - /* Tx */ - u64 tx_frames_ok; - u64 tx_drops; u64 tx_tso; u64 tx_timeout; u64 txq_stop; u64 txq_wake; + + struct u64_stats_sync syncp; }; struct nicvf { @@ -282,7 +291,6 @@ struct nicvf { u8 node; u8 cpi_alg; - u16 mtu; bool link_up; u8 duplex; u32 speed; @@ -298,7 +306,7 @@ struct nicvf { /* Stats */ struct nicvf_hw_stats hw_stats; - struct nicvf_drv_stats drv_stats; + struct nicvf_drv_stats __percpu *drv_stats; struct bgx_stats bgx_stats; /* MSI-X */ diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 2bbf4cbf08b2..6677b96e1f3f 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "nic_reg.h" #include "nic.h" @@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) /* Update hardware min/max frame size */ static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) { - if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { - dev_err(&nic->pdev->dev, - "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", - vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); - return 1; - } - new_frs += ETH_HLEN; - if (new_frs <= nic->pkind.maxlen) - return 0; + int bgx, lmac, lmac_cnt; + u64 lmac_credits; - nic->pkind.maxlen = new_frs; - nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); + if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) + return 1; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac += bgx * MAX_LMAC_PER_BGX; + + new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4; + + /* Update corresponding LMAC credits */ + lmac_cnt = bgx_get_lmac_count(nic->node, bgx); + lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8)); + lmac_credits &= ~(0xFFFFFULL << 12); + lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12); + nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits); + + /* Enforce MTU in HW + * This config is supported only from 88xx pass 2.0 onwards. + */ + if (!pass1_silicon(nic->pdev)) + nic_reg_write(nic, + NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs); return 0; } @@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic) /* PKIND configuration */ nic->pkind.minlen = 0; - nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; + nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4; nic->pkind.lenerr_en = 1; nic->pkind.rx_hdr = 0; nic->pkind.hdr_sl = 0; @@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic, nic_reg_write(nic, reg_addr, 0); } } + return 0; } diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index edf779f5a227..80d46337cf29 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h @@ -106,6 +106,7 @@ #define NIC_PF_MPI_0_2047_CFG (0x210000) #define NIC_PF_RSSI_0_4097_RQ (0x220000) #define NIC_PF_LMAC_0_7_CFG (0x240000) +#define NIC_PF_LMAC_0_7_CFG2 (0x240100) #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) #define NIC_PF_LMAC_0_7_CREDIT (0x244000) #define NIC_PF_CHAN_0_255_TX_CFG (0x400000) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index ad4fddb55421..432bf6be57cb 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -36,11 +36,11 @@ struct nicvf_stat { static const struct nicvf_stat nicvf_hw_stats[] = { NICVF_HW_STAT(rx_bytes), + NICVF_HW_STAT(rx_frames), NICVF_HW_STAT(rx_ucast_frames), NICVF_HW_STAT(rx_bcast_frames), NICVF_HW_STAT(rx_mcast_frames), - NICVF_HW_STAT(rx_fcs_errors), - NICVF_HW_STAT(rx_l2_errors), + NICVF_HW_STAT(rx_drops), NICVF_HW_STAT(rx_drop_red), NICVF_HW_STAT(rx_drop_red_bytes), NICVF_HW_STAT(rx_drop_overrun), @@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = { NICVF_HW_STAT(rx_drop_mcast), NICVF_HW_STAT(rx_drop_l3_bcast), NICVF_HW_STAT(rx_drop_l3_mcast), - NICVF_HW_STAT(rx_bgx_truncated_pkts), - NICVF_HW_STAT(rx_jabber_errs), - NICVF_HW_STAT(rx_fcs_errs), - NICVF_HW_STAT(rx_bgx_errs), - NICVF_HW_STAT(rx_prel2_errs), - NICVF_HW_STAT(rx_l2_hdr_malformed), - NICVF_HW_STAT(rx_oversize), - NICVF_HW_STAT(rx_undersize), - NICVF_HW_STAT(rx_l2_len_mismatch), - NICVF_HW_STAT(rx_l2_pclp), - NICVF_HW_STAT(rx_ip_ver_errs), - NICVF_HW_STAT(rx_ip_csum_errs), - NICVF_HW_STAT(rx_ip_hdr_malformed), - NICVF_HW_STAT(rx_ip_payload_malformed), - NICVF_HW_STAT(rx_ip_ttl_errs), - NICVF_HW_STAT(rx_l3_pclp), - NICVF_HW_STAT(rx_l4_malformed), - NICVF_HW_STAT(rx_l4_csum_errs), - NICVF_HW_STAT(rx_udp_len_errs), - NICVF_HW_STAT(rx_l4_port_errs), - NICVF_HW_STAT(rx_tcp_flag_errs), - NICVF_HW_STAT(rx_tcp_offset_errs), - NICVF_HW_STAT(rx_l4_pclp), - NICVF_HW_STAT(rx_truncated_pkts), - NICVF_HW_STAT(tx_bytes_ok), - NICVF_HW_STAT(tx_ucast_frames_ok), - NICVF_HW_STAT(tx_bcast_frames_ok), - NICVF_HW_STAT(tx_mcast_frames_ok), + NICVF_HW_STAT(rx_fcs_errors), + NICVF_HW_STAT(rx_l2_errors), + NICVF_HW_STAT(tx_bytes), + NICVF_HW_STAT(tx_frames), + NICVF_HW_STAT(tx_ucast_frames), + NICVF_HW_STAT(tx_bcast_frames), + NICVF_HW_STAT(tx_mcast_frames), + NICVF_HW_STAT(tx_drops), }; static const struct nicvf_stat nicvf_drv_stats[] = { - NICVF_DRV_STAT(rx_frames_ok), - NICVF_DRV_STAT(rx_frames_64), - NICVF_DRV_STAT(rx_frames_127), - NICVF_DRV_STAT(rx_frames_255), - NICVF_DRV_STAT(rx_frames_511), - NICVF_DRV_STAT(rx_frames_1023), - NICVF_DRV_STAT(rx_frames_1518), - NICVF_DRV_STAT(rx_frames_jumbo), - NICVF_DRV_STAT(rx_drops), + NICVF_DRV_STAT(rx_bgx_truncated_pkts), + NICVF_DRV_STAT(rx_jabber_errs), + NICVF_DRV_STAT(rx_fcs_errs), + NICVF_DRV_STAT(rx_bgx_errs), + NICVF_DRV_STAT(rx_prel2_errs), + NICVF_DRV_STAT(rx_l2_hdr_malformed), + NICVF_DRV_STAT(rx_oversize), + NICVF_DRV_STAT(rx_undersize), + NICVF_DRV_STAT(rx_l2_len_mismatch), + NICVF_DRV_STAT(rx_l2_pclp), + NICVF_DRV_STAT(rx_ip_ver_errs), + NICVF_DRV_STAT(rx_ip_csum_errs), + NICVF_DRV_STAT(rx_ip_hdr_malformed), + NICVF_DRV_STAT(rx_ip_payload_malformed), + NICVF_DRV_STAT(rx_ip_ttl_errs), + NICVF_DRV_STAT(rx_l3_pclp), + NICVF_DRV_STAT(rx_l4_malformed), + NICVF_DRV_STAT(rx_l4_csum_errs), + NICVF_DRV_STAT(rx_udp_len_errs), + NICVF_DRV_STAT(rx_l4_port_errs), + NICVF_DRV_STAT(rx_tcp_flag_errs), + NICVF_DRV_STAT(rx_tcp_offset_errs), + NICVF_DRV_STAT(rx_l4_pclp), + NICVF_DRV_STAT(rx_truncated_pkts), + + NICVF_DRV_STAT(tx_desc_fault), + NICVF_DRV_STAT(tx_hdr_cons_err), + NICVF_DRV_STAT(tx_subdesc_err), + NICVF_DRV_STAT(tx_max_size_exceeded), + NICVF_DRV_STAT(tx_imm_size_oflow), + NICVF_DRV_STAT(tx_data_seq_err), + NICVF_DRV_STAT(tx_mem_seq_err), + NICVF_DRV_STAT(tx_lock_viol), + NICVF_DRV_STAT(tx_data_fault), + NICVF_DRV_STAT(tx_tstmp_conflict), + NICVF_DRV_STAT(tx_tstmp_timeout), + NICVF_DRV_STAT(tx_mem_fault), + NICVF_DRV_STAT(tx_csum_overlap), + NICVF_DRV_STAT(tx_csum_overflow), + NICVF_DRV_STAT(rcv_buffer_alloc_failures), - NICVF_DRV_STAT(tx_frames_ok), NICVF_DRV_STAT(tx_tso), - NICVF_DRV_STAT(tx_drops), NICVF_DRV_STAT(tx_timeout), NICVF_DRV_STAT(txq_stop), NICVF_DRV_STAT(txq_wake), @@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct nicvf *nic = netdev_priv(netdev); - int stat; - int sqs; + int stat, tmp_stats; + int sqs, cpu; nicvf_update_stats(nic); @@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, for (stat = 0; stat < nicvf_n_hw_stats; stat++) *(data++) = ((u64 *)&nic->hw_stats) [nicvf_hw_stats[stat].index]; - for (stat = 0; stat < nicvf_n_drv_stats; stat++) - *(data++) = ((u64 *)&nic->drv_stats) - [nicvf_drv_stats[stat].index]; + for (stat = 0; stat < nicvf_n_drv_stats; stat++) { + tmp_stats = 0; + for_each_possible_cpu(cpu) + tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu)) + [nicvf_drv_stats[stat].index]; + *(data++) = tmp_stats; + } nicvf_get_qset_stats(nic, stats, &data); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 45a13f718863..8a37012c9c89 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) return qidx; } -static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, - struct sk_buff *skb) -{ - if (skb->len <= 64) - nic->drv_stats.rx_frames_64++; - else if (skb->len <= 127) - nic->drv_stats.rx_frames_127++; - else if (skb->len <= 255) - nic->drv_stats.rx_frames_255++; - else if (skb->len <= 511) - nic->drv_stats.rx_frames_511++; - else if (skb->len <= 1023) - nic->drv_stats.rx_frames_1023++; - else if (skb->len <= 1518) - nic->drv_stats.rx_frames_1518++; - else - nic->drv_stats.rx_frames_jumbo++; -} - /* The Cavium ThunderX network controller can *only* be found in SoCs * containing the ThunderX ARM64 CPU implementation. All accesses to the device * registers on this platform are implicitly strongly ordered with respect @@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev, static int nicvf_init_resources(struct nicvf *nic) { int err; - union nic_mbx mbx = {}; - - mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; /* Enable Qset */ nicvf_qset_config(nic, true); @@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic) return err; } - /* Send VF config done msg to PF */ - nicvf_write_to_mbx(nic, &mbx); - return 0; } static void nicvf_snd_pkt_handler(struct net_device *netdev, - struct cmp_queue *cq, struct cqe_send_t *cqe_tx, int cqe_type, int budget, unsigned int *tx_pkts, unsigned int *tx_bytes) @@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, cqe_tx->sqe_ptr, hdr->subdesc_cnt); - nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); + nicvf_check_cqe_tx_errs(nic, cqe_tx); skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; if (skb) { /* Check for dummy descriptor used for HW TSO offload on 88xx */ @@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, return; } - nicvf_set_rx_frame_cnt(nic, skb); - nicvf_set_rxhash(netdev, cqe_rx, skb); skb_record_rx_queue(skb, rq_idx); @@ -703,7 +675,7 @@ loop: work_done++; break; case CQE_TYPE_SEND: - nicvf_snd_pkt_handler(netdev, cq, + nicvf_snd_pkt_handler(netdev, (void *)cq_desc, CQE_TYPE_SEND, budget, &tx_pkts, &tx_bytes); tx_done++; @@ -740,7 +712,7 @@ done: nic = nic->pnicvf; if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { netif_tx_start_queue(txq); - nic->drv_stats.txq_wake++; + this_cpu_inc(nic->drv_stats->txq_wake); if (netif_msg_tx_err(nic)) netdev_warn(netdev, "%s: Transmit queue wakeup SQ%d\n", @@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { netif_tx_stop_queue(txq); - nic->drv_stats.txq_stop++; + this_cpu_inc(nic->drv_stats->txq_stop); if (netif_msg_tx_err(nic)) netdev_warn(netdev, "%s: Transmit ring full, stopping SQ%d\n", @@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev) return 0; } +static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) +{ + union nic_mbx mbx = {}; + + mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; + mbx.frs.max_frs = mtu; + mbx.frs.vf_id = nic->vf_id; + + return nicvf_send_msg_to_pf(nic, &mbx); +} + int nicvf_open(struct net_device *netdev) { - int err, qidx; + int cpu, err, qidx; struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; struct nicvf_cq_poll *cq_poll = NULL; - - nic->mtu = netdev->mtu; + union nic_mbx mbx = {}; netif_carrier_off(netdev); @@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev) if (nic->sqs_mode) nicvf_get_primary_vf_struct(nic); - /* Configure receive side scaling */ - if (!nic->sqs_mode) + /* Configure receive side scaling and MTU */ + if (!nic->sqs_mode) { nicvf_rss_init(nic); + if (nicvf_update_hw_max_frs(nic, netdev->mtu)) + goto cleanup; + + /* Clear percpu stats */ + for_each_possible_cpu(cpu) + memset(per_cpu_ptr(nic->drv_stats, cpu), 0, + sizeof(struct nicvf_drv_stats)); + } err = nicvf_register_interrupts(nic); if (err) @@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev) for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); - nic->drv_stats.txq_stop = 0; - nic->drv_stats.txq_wake = 0; + /* Send VF config done msg to PF */ + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; + nicvf_write_to_mbx(nic, &mbx); return 0; cleanup: @@ -1297,17 +1288,6 @@ napi_del: return err; } -static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) -{ - union nic_mbx mbx = {}; - - mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; - mbx.frs.max_frs = mtu; - mbx.frs.vf_id = nic->vf_id; - - return nicvf_send_msg_to_pf(nic, &mbx); -} - static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) { struct nicvf *nic = netdev_priv(netdev); @@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) if (new_mtu < NIC_HW_MIN_FRS) return -EINVAL; + netdev->mtu = new_mtu; + + if (!netif_running(netdev)) + return 0; + if (nicvf_update_hw_max_frs(nic, new_mtu)) return -EINVAL; - netdev->mtu = new_mtu; - nic->mtu = new_mtu; return 0; } @@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic) void nicvf_update_stats(struct nicvf *nic) { - int qidx; + int qidx, cpu; + u64 tmp_stats = 0; struct nicvf_hw_stats *stats = &nic->hw_stats; - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; + struct nicvf_drv_stats *drv_stats; struct queue_set *qs = nic->qs; #define GET_RX_STATS(reg) \ @@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic) stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); - stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); - stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); - stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); - stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); + stats->tx_bytes = GET_TX_STATS(TX_OCTS); + stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST); + stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST); + stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST); stats->tx_drops = GET_TX_STATS(TX_DROP); - drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + - stats->tx_bcast_frames_ok + - stats->tx_mcast_frames_ok; - drv_stats->rx_frames_ok = stats->rx_ucast_frames + - stats->rx_bcast_frames + - stats->rx_mcast_frames; - drv_stats->rx_drops = stats->rx_drop_red + - stats->rx_drop_overrun; - drv_stats->tx_drops = stats->tx_drops; + /* On T88 pass 2.0, the dummy SQE added for TSO notification + * via CQE has 'dont_send' set. Hence HW drops the pkt pointed + * pointed by dummy SQE and results in tx_drops counter being + * incremented. Subtracting it from tx_tso counter will give + * exact tx_drops counter. + */ + if (nic->t88 && nic->hw_tso) { + for_each_possible_cpu(cpu) { + drv_stats = per_cpu_ptr(nic->drv_stats, cpu); + tmp_stats += drv_stats->tx_tso; + } + stats->tx_drops = tmp_stats - stats->tx_drops; + } + stats->tx_frames = stats->tx_ucast_frames + + stats->tx_bcast_frames + + stats->tx_mcast_frames; + stats->rx_frames = stats->rx_ucast_frames + + stats->rx_bcast_frames + + stats->rx_mcast_frames; + stats->rx_drops = stats->rx_drop_red + + stats->rx_drop_overrun; /* Update RQ and SQ stats */ for (qidx = 0; qidx < qs->rq_cnt; qidx++) @@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, { struct nicvf *nic = netdev_priv(netdev); struct nicvf_hw_stats *hw_stats = &nic->hw_stats; - struct nicvf_drv_stats *drv_stats = &nic->drv_stats; nicvf_update_stats(nic); stats->rx_bytes = hw_stats->rx_bytes; - stats->rx_packets = drv_stats->rx_frames_ok; - stats->rx_dropped = drv_stats->rx_drops; + stats->rx_packets = hw_stats->rx_frames; + stats->rx_dropped = hw_stats->rx_drops; stats->multicast = hw_stats->rx_mcast_frames; - stats->tx_bytes = hw_stats->tx_bytes_ok; - stats->tx_packets = drv_stats->tx_frames_ok; - stats->tx_dropped = drv_stats->tx_drops; + stats->tx_bytes = hw_stats->tx_bytes; + stats->tx_packets = hw_stats->tx_frames; + stats->tx_dropped = hw_stats->tx_drops; return stats; } @@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev) netdev_warn(dev, "%s: Transmit timed out, resetting\n", dev->name); - nic->drv_stats.tx_timeout++; + this_cpu_inc(nic->drv_stats->tx_timeout); schedule_work(&nic->reset_task); } @@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_netdev; } + nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats); + if (!nic->drv_stats) { + err = -ENOMEM; + goto err_free_netdev; + } + err = nicvf_set_qset_resources(nic); if (err) goto err_free_netdev; @@ -1648,6 +1649,8 @@ err_unregister_interrupts: nicvf_unregister_interrupts(nic); err_free_netdev: pci_set_drvdata(pdev, NULL); + if (nic->drv_stats) + free_percpu(nic->drv_stats); free_netdev(netdev); err_release_regions: pci_release_regions(pdev); @@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev) unregister_netdev(pnetdev); nicvf_unregister_interrupts(nic); pci_set_drvdata(pdev, NULL); + if (nic->drv_stats) + free_percpu(nic->drv_stats); free_netdev(netdev); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index a4fc50155881..747ef0882976 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, order); if (!nic->rb_page) { - nic->drv_stats.rcv_buffer_alloc_failures++; + this_cpu_inc(nic->pnicvf->drv_stats-> + rcv_buffer_alloc_failures); return -ENOMEM; } nic->rb_page_offset = 0; @@ -270,7 +271,8 @@ refill: rbdr_idx, new_rb); next_rbdr: /* Re-enable RBDR interrupts only if buffer allocation is success */ - if (!nic->rb_alloc_fail && rbdr->enable) + if (!nic->rb_alloc_fail && rbdr->enable && + netif_running(nic->pnicvf->netdev)) nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); if (rbdr_idx) @@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic, static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) { + struct sk_buff *skb; + if (!sq) return; if (!sq->dmem.base) @@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) sq->dmem.q_len * TSO_HEADER_SIZE, sq->tso_hdrs, sq->tso_hdrs_phys); + /* Free pending skbs in the queue */ + smp_rmb(); + while (sq->head != sq->tail) { + skb = (struct sk_buff *)sq->skbuff[sq->head]; + if (skb) + dev_kfree_skb_any(skb); + sq->head++; + sq->head &= (sq->dmem.q_len - 1); + } kfree(sq->skbuff); nicvf_free_q_desc_mem(nic, &sq->dmem); } @@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) { union nic_mbx mbx = {}; - /* Reset all RXQ's stats */ + /* Reset all RQ/SQ and VF stats */ mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; + mbx.reset_stat.rx_stat_mask = 0x3FFF; + mbx.reset_stat.tx_stat_mask = 0x1F; mbx.reset_stat.rq_stat_mask = 0xFFFF; + mbx.reset_stat.sq_stat_mask = 0xFFFF; nicvf_send_msg_to_pf(nic, &mbx); } @@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); nicvf_send_msg_to_pf(nic, &mbx); - nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); - if (!nic->sqs_mode) + if (!nic->sqs_mode && (qidx == 0)) { + /* Enable checking L3/L4 length and TCP/UDP checksums */ + nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, + (BIT(24) | BIT(23) | BIT(21))); nicvf_config_vlan_stripping(nic, nic->netdev->features); + } /* Enable Receive queue */ memset(&rq_cfg, 0, sizeof(struct rq_cfg)); @@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; /* For non-tunneled pkts, point this to L2 ethertype */ hdr->inner_l3_offset = skb_network_offset(skb) - 2; - nic->drv_stats.tx_tso++; + this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); } } @@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); - nic->drv_stats.tx_tso++; + this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); return 1; } @@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) /* Check for errors in the receive cmp.queue entry */ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) { - struct nicvf_hw_stats *stats = &nic->hw_stats; - if (!cqe_rx->err_level && !cqe_rx->err_opcode) return 0; @@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) switch (cqe_rx->err_opcode) { case CQ_RX_ERROP_RE_PARTIAL: - stats->rx_bgx_truncated_pkts++; + this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); break; case CQ_RX_ERROP_RE_JABBER: - stats->rx_jabber_errs++; + this_cpu_inc(nic->drv_stats->rx_jabber_errs); break; case CQ_RX_ERROP_RE_FCS: - stats->rx_fcs_errs++; + this_cpu_inc(nic->drv_stats->rx_fcs_errs); break; case CQ_RX_ERROP_RE_RX_CTL: - stats->rx_bgx_errs++; + this_cpu_inc(nic->drv_stats->rx_bgx_errs); break; case CQ_RX_ERROP_PREL2_ERR: - stats->rx_prel2_errs++; + this_cpu_inc(nic->drv_stats->rx_prel2_errs); break; case CQ_RX_ERROP_L2_MAL: - stats->rx_l2_hdr_malformed++; + this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); break; case CQ_RX_ERROP_L2_OVERSIZE: - stats->rx_oversize++; + this_cpu_inc(nic->drv_stats->rx_oversize); break; case CQ_RX_ERROP_L2_UNDERSIZE: - stats->rx_undersize++; + this_cpu_inc(nic->drv_stats->rx_undersize); break; case CQ_RX_ERROP_L2_LENMISM: - stats->rx_l2_len_mismatch++; + this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); break; case CQ_RX_ERROP_L2_PCLP: - stats->rx_l2_pclp++; + this_cpu_inc(nic->drv_stats->rx_l2_pclp); break; case CQ_RX_ERROP_IP_NOT: - stats->rx_ip_ver_errs++; + this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); break; case CQ_RX_ERROP_IP_CSUM_ERR: - stats->rx_ip_csum_errs++; + this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); break; case CQ_RX_ERROP_IP_MAL: - stats->rx_ip_hdr_malformed++; + this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); break; case CQ_RX_ERROP_IP_MALD: - stats->rx_ip_payload_malformed++; + this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); break; case CQ_RX_ERROP_IP_HOP: - stats->rx_ip_ttl_errs++; + this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); break; case CQ_RX_ERROP_L3_PCLP: - stats->rx_l3_pclp++; + this_cpu_inc(nic->drv_stats->rx_l3_pclp); break; case CQ_RX_ERROP_L4_MAL: - stats->rx_l4_malformed++; + this_cpu_inc(nic->drv_stats->rx_l4_malformed); break; case CQ_RX_ERROP_L4_CHK: - stats->rx_l4_csum_errs++; + this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); break; case CQ_RX_ERROP_UDP_LEN: - stats->rx_udp_len_errs++; + this_cpu_inc(nic->drv_stats->rx_udp_len_errs); break; case CQ_RX_ERROP_L4_PORT: - stats->rx_l4_port_errs++; + this_cpu_inc(nic->drv_stats->rx_l4_port_errs); break; case CQ_RX_ERROP_TCP_FLAG: - stats->rx_tcp_flag_errs++; + this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); break; case CQ_RX_ERROP_TCP_OFFSET: - stats->rx_tcp_offset_errs++; + this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); break; case CQ_RX_ERROP_L4_PCLP: - stats->rx_l4_pclp++; + this_cpu_inc(nic->drv_stats->rx_l4_pclp); break; case CQ_RX_ERROP_RBDR_TRUNC: - stats->rx_truncated_pkts++; + this_cpu_inc(nic->drv_stats->rx_truncated_pkts); break; } @@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) } /* Check for errors in the send cmp.queue entry */ -int nicvf_check_cqe_tx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_send_t *cqe_tx) +int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) { - struct cmp_queue_stats *stats = &cq->stats; - switch (cqe_tx->send_status) { case CQ_TX_ERROP_GOOD: - stats->tx.good++; return 0; case CQ_TX_ERROP_DESC_FAULT: - stats->tx.desc_fault++; + this_cpu_inc(nic->drv_stats->tx_desc_fault); break; case CQ_TX_ERROP_HDR_CONS_ERR: - stats->tx.hdr_cons_err++; + this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); break; case CQ_TX_ERROP_SUBDC_ERR: - stats->tx.subdesc_err++; + this_cpu_inc(nic->drv_stats->tx_subdesc_err); + break; + case CQ_TX_ERROP_MAX_SIZE_VIOL: + this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); break; case CQ_TX_ERROP_IMM_SIZE_OFLOW: - stats->tx.imm_size_oflow++; + this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); break; case CQ_TX_ERROP_DATA_SEQUENCE_ERR: - stats->tx.data_seq_err++; + this_cpu_inc(nic->drv_stats->tx_data_seq_err); break; case CQ_TX_ERROP_MEM_SEQUENCE_ERR: - stats->tx.mem_seq_err++; + this_cpu_inc(nic->drv_stats->tx_mem_seq_err); break; case CQ_TX_ERROP_LOCK_VIOL: - stats->tx.lock_viol++; + this_cpu_inc(nic->drv_stats->tx_lock_viol); break; case CQ_TX_ERROP_DATA_FAULT: - stats->tx.data_fault++; + this_cpu_inc(nic->drv_stats->tx_data_fault); break; case CQ_TX_ERROP_TSTMP_CONFLICT: - stats->tx.tstmp_conflict++; + this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); break; case CQ_TX_ERROP_TSTMP_TIMEOUT: - stats->tx.tstmp_timeout++; + this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); break; case CQ_TX_ERROP_MEM_FAULT: - stats->tx.mem_fault++; + this_cpu_inc(nic->drv_stats->tx_mem_fault); break; case CQ_TX_ERROP_CK_OVERLAP: - stats->tx.csum_overlap++; + this_cpu_inc(nic->drv_stats->tx_csum_overlap); break; case CQ_TX_ERROP_CK_OFLOW: - stats->tx.csum_overflow++; + this_cpu_inc(nic->drv_stats->tx_csum_overflow); break; } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 869f3386028b..2e3c940c1093 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E { CQ_TX_ERROP_DESC_FAULT = 0x10, CQ_TX_ERROP_HDR_CONS_ERR = 0x11, CQ_TX_ERROP_SUBDC_ERR = 0x12, + CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13, CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, @@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E { CQ_TX_ERROP_ENUM_LAST = 0x8a, }; -struct cmp_queue_stats { - struct tx_stats { - u64 good; - u64 desc_fault; - u64 hdr_cons_err; - u64 subdesc_err; - u64 imm_size_oflow; - u64 data_seq_err; - u64 mem_seq_err; - u64 lock_viol; - u64 data_fault; - u64 tstmp_conflict; - u64 tstmp_timeout; - u64 mem_fault; - u64 csum_overlap; - u64 csum_overflow; - } tx; -} ____cacheline_aligned_in_smp; - enum RQ_SQ_STATS { RQ_SQ_STATS_OCTS, RQ_SQ_STATS_PKTS, @@ -241,7 +223,6 @@ struct cmp_queue { spinlock_t lock; /* lock to serialize processing CQEs */ void *desc; struct q_desc_mem dmem; - struct cmp_queue_stats stats; int irq; } ____cacheline_aligned_in_smp; @@ -336,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); -int nicvf_check_cqe_tx_errs(struct nicvf *nic, - struct cmp_queue *cq, struct cqe_send_t *cqe_tx); +int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx); #endif /* NICVF_QUEUES_H */ diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 8bbaedbb7b94..050e21fbb147 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { - bgx->bgx_id = - (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; + bgx->bgx_id = (pci_resource_start(pdev, + PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; bgx->max_lmac = MAX_LMAC_PER_BGX; bgx_vnic[bgx->bgx_id] = bgx; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index d59c71e4a000..01cc7c859131 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -28,6 +28,8 @@ #define MAX_DMAC_PER_LMAC 8 #define MAX_FRAME_SIZE 9216 +#define BGX_ID_MASK 0x3 + #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 /* Registers */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 1e74fd6085df..e19a0ca8e5dd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2951,7 +2951,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, rq->cntxt_id, fl_id, 0xffff); dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, rq->desc, rq->phys_addr); - napi_hash_del(&rq->napi); netif_napi_del(&rq->napi); rq->netdev = NULL; rq->cntxt_id = rq->abs_id = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 50812a1d67bd..ecf3ccc257bc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -168,6 +168,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/ + CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/ /* T6 adapters: */ @@ -178,9 +179,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6005), CH_PCI_ID_TABLE_FENTRY(0x6006), CH_PCI_ID_TABLE_FENTRY(0x6007), + CH_PCI_ID_TABLE_FENTRY(0x6008), CH_PCI_ID_TABLE_FENTRY(0x6009), CH_PCI_ID_TABLE_FENTRY(0x600d), - CH_PCI_ID_TABLE_FENTRY(0x6010), CH_PCI_ID_TABLE_FENTRY(0x6011), CH_PCI_ID_TABLE_FENTRY(0x6014), CH_PCI_ID_TABLE_FENTRY(0x6015), diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index cece8a08edca..93aa2939142a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2813,7 +2813,6 @@ static void be_evt_queues_destroy(struct be_adapter *adapter) if (eqo->q.created) { be_eq_clean(eqo); be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); - napi_hash_del(&eqo->napi); netif_napi_del(&eqo->napi); free_cpumask_var(eqo->affinity_mask); } diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index c865135f3cb9..5ea740b4cf14 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -574,6 +574,8 @@ struct fec_enet_private { unsigned int reload_period; int pps_enable; unsigned int next_counter; + + u64 ethtool_stats[0]; }; void fec_ptp_init(struct platform_device *pdev); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 5aa9d4ded214..5f77caa59534 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2313,14 +2313,24 @@ static const struct fec_stat { { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, }; -static void fec_enet_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) +static void fec_enet_update_ethtool_stats(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); int i; for (i = 0; i < ARRAY_SIZE(fec_stats); i++) - data[i] = readl(fep->hwp + fec_stats[i].offset); + fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); +} + +static void fec_enet_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct fec_enet_private *fep = netdev_priv(dev); + + if (netif_running(dev)) + fec_enet_update_ethtool_stats(dev); + + memcpy(data, fep->ethtool_stats, ARRAY_SIZE(fec_stats) * sizeof(u64)); } static void fec_enet_get_strings(struct net_device *netdev, @@ -2874,6 +2884,8 @@ fec_enet_close(struct net_device *ndev) if (fep->quirks & FEC_QUIRK_ERR006687) imx6q_cpuidle_fec_irqs_unused(); + fec_enet_update_ethtool_stats(ndev); + fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); pm_runtime_mark_last_busy(&fep->pdev->dev); @@ -3180,6 +3192,8 @@ static int fec_enet_init(struct net_device *ndev) fec_restart(ndev); + fec_enet_update_ethtool_stats(ndev); + return 0; } @@ -3278,7 +3292,8 @@ fec_probe(struct platform_device *pdev) fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); /* Init network device */ - ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), + ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + + ARRAY_SIZE(fec_stats) * sizeof(u64), num_tx_qs, num_rx_qs); if (!ndev) return -ENOMEM; @@ -3475,6 +3490,8 @@ failed_regulator: failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); failed_phy: of_node_put(phy_node); failed_ioremap: @@ -3488,6 +3505,7 @@ fec_drv_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + struct device_node *np = pdev->dev.of_node; cancel_work_sync(&fep->tx_timeout_work); fec_ptp_stop(pdev); @@ -3495,6 +3513,8 @@ fec_drv_remove(struct platform_device *pdev) fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); free_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 53ef51e3bd9e..71a5ded9d1de 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -1107,6 +1107,9 @@ int memac_free(struct fman_mac *memac) { free_init_resources(memac); + if (memac->pcsphy) + put_device(&memac->pcsphy->mdio.dev); + kfree(memac->memac_drv_param); kfree(memac); diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index efabb04a1ae8..4b0f3a50b293 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -722,9 +722,6 @@ int tgec_free(struct fman_mac *tgec) { free_init_resources(tgec); - if (tgec->cfg) - tgec->cfg = NULL; - kfree(tgec->cfg); kfree(tgec); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 8fe6b3e253fa..736db9d9b0ad 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -892,6 +892,8 @@ static int mac_probe(struct platform_device *_of_dev) priv->fixed_link->duplex = phy->duplex; priv->fixed_link->pause = phy->pause; priv->fixed_link->asym_pause = phy->asym_pause; + + put_device(&phy->mdio.dev); } err = mac_dev->init(mac_dev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index dc120c148d97..4b86260584a0 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -980,7 +980,7 @@ static int fs_enet_probe(struct platform_device *ofdev) err = clk_prepare_enable(clk); if (err) { ret = err; - goto out_free_fpi; + goto out_deregister_fixed_link; } fpi->clk_per = clk; } @@ -1061,6 +1061,9 @@ out_put: of_node_put(fpi->phy_node); if (fpi->clk_per) clk_disable_unprepare(fpi->clk_per); +out_deregister_fixed_link: + if (of_phy_is_fixed_link(ofdev->dev.of_node)) + of_phy_deregister_fixed_link(ofdev->dev.of_node); out_free_fpi: kfree(fpi); return ret; @@ -1079,6 +1082,8 @@ static int fs_enet_remove(struct platform_device *ofdev) of_node_put(fep->fpi->phy_node); if (fep->fpi->clk_per) clk_disable_unprepare(fep->fpi->clk_per); + if (of_phy_is_fixed_link(ofdev->dev.of_node)) + of_phy_deregister_fixed_link(ofdev->dev.of_node); free_netdev(ndev); return 0; } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4b4f5bc0e279..9061c2f82b9c 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1312,6 +1312,7 @@ static void gfar_init_addr_hash_table(struct gfar_private *priv) */ static int gfar_probe(struct platform_device *ofdev) { + struct device_node *np = ofdev->dev.of_node; struct net_device *dev = NULL; struct gfar_private *priv = NULL; int err = 0, i; @@ -1462,6 +1463,8 @@ static int gfar_probe(struct platform_device *ofdev) return 0; register_fail: + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); unmap_group_regs(priv); gfar_free_rx_queues(priv); gfar_free_tx_queues(priv); @@ -1474,11 +1477,16 @@ register_fail: static int gfar_remove(struct platform_device *ofdev) { struct gfar_private *priv = platform_get_drvdata(ofdev); + struct device_node *np = ofdev->dev.of_node; of_node_put(priv->phy_node); of_node_put(priv->tbi_node); unregister_netdev(priv->ndev); + + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + unmap_group_regs(priv); gfar_free_rx_queues(priv); gfar_free_tx_queues(priv); diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 186ef8f16c80..f76d33279454 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3868,9 +3868,8 @@ static int ucc_geth_probe(struct platform_device* ofdev) dev = alloc_etherdev(sizeof(*ugeth)); if (dev == NULL) { - of_node_put(ug_info->tbi_node); - of_node_put(ug_info->phy_node); - return -ENOMEM; + err = -ENOMEM; + goto err_deregister_fixed_link; } ugeth = netdev_priv(dev); @@ -3907,10 +3906,7 @@ static int ucc_geth_probe(struct platform_device* ofdev) if (netif_msg_probe(ugeth)) pr_err("%s: Cannot register net device, aborting\n", dev->name); - free_netdev(dev); - of_node_put(ug_info->tbi_node); - of_node_put(ug_info->phy_node); - return err; + goto err_free_netdev; } mac_addr = of_get_mac_address(np); @@ -3923,16 +3919,29 @@ static int ucc_geth_probe(struct platform_device* ofdev) ugeth->node = np; return 0; + +err_free_netdev: + free_netdev(dev); +err_deregister_fixed_link: + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + of_node_put(ug_info->tbi_node); + of_node_put(ug_info->phy_node); + + return err; } static int ucc_geth_remove(struct platform_device* ofdev) { struct net_device *dev = platform_get_drvdata(ofdev); struct ucc_geth_private *ugeth = netdev_priv(dev); + struct device_node *np = ofdev->dev.of_node; unregister_netdev(dev); free_netdev(dev); ucc_geth_memclean(ugeth); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); of_node_put(ugeth->ug_info->tbi_node); of_node_put(ugeth->ug_info->phy_node); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index c54c6fac0d1d..b6ed818f78ff 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, return ERR_PTR(-ENODEV); handle = dev->ops->get_handle(dev, port_id); - if (IS_ERR(handle)) + if (IS_ERR(handle)) { + put_device(&dev->cls_dev); return handle; + } handle->dev = dev; handle->owner_dev = owner_dev; @@ -356,6 +358,8 @@ out_when_init_queue: for (j = i - 1; j >= 0; j--) hnae_fini_queue(handle->qs[j]); + put_device(&dev->cls_dev); + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL(hnae_get_handle); @@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h) dev->ops->put_handle(h); module_put(dev->owner); + + put_device(&dev->cls_dev); } EXPORT_SYMBOL(hnae_put_handle); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 54efa9a5167b..bd719e25dd76 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2446,6 +2446,8 @@ static int ehea_open(struct net_device *dev) netif_info(port, ifup, dev, "enabling port\n"); + netif_carrier_off(dev); + ret = ehea_up(dev); if (!ret) { port_napi_enable(port); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5f44c5520fbc..0fbf686f5e7c 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -74,7 +74,6 @@ #include #include #include -#include #include #include "ibmvnic.h" @@ -1505,9 +1504,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) adapter->max_rx_add_entries_per_subcrq > entries_page ? entries_page : adapter->max_rx_add_entries_per_subcrq; - /* Choosing the maximum number of queues supported by firmware*/ - adapter->req_tx_queues = adapter->max_tx_queues; - adapter->req_rx_queues = adapter->max_rx_queues; + adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; + adapter->req_rx_queues = adapter->opt_rx_comp_queues; adapter->req_rx_add_queues = adapter->max_rx_add_queues; adapter->req_mtu = adapter->max_mtu; @@ -3706,7 +3704,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) struct net_device *netdev; unsigned char *mac_addr_p; struct dentry *ent; - char buf[16]; /* debugfs name buf */ + char buf[17]; /* debugfs name buf */ int rc; dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", @@ -3845,6 +3843,9 @@ static int ibmvnic_remove(struct vio_dev *dev) if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) debugfs_remove_recursive(adapter->debugfs_dir); + dma_unmap_single(&dev->dev, adapter->stats_token, + sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE); + if (adapter->ras_comps) dma_free_coherent(&dev->dev, adapter->ras_comp_num * diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index edc9a6ac5169..9affd7c198bd 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4931,11 +4931,15 @@ static int igb_tso(struct igb_ring *tx_ring, /* initialize outer IP header fields */ if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + /* IP header will have to cancel out any data that * is not a part of the outer IP header */ - ip.v4->check = csum_fold(csum_add(lco_csum(skb), - csum_unfold(l4.tcp->check))); + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 12bb877df860..7dff7f6239cd 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1965,11 +1965,15 @@ static int igbvf_tso(struct igbvf_ring *tx_ring, /* initialize outer IP header fields */ if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + /* IP header will have to cancel out any data that * is not a part of the outer IP header */ - ip.v4->check = csum_fold(csum_add(lco_csum(skb), - csum_unfold(l4.tcp->check))); + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bd93d823cc25..fee1f2918ead 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7277,11 +7277,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, /* initialize outer IP header fields */ if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + /* IP header will have to cancel out any data that * is not a part of the outer IP header */ - ip.v4->check = csum_fold(csum_add(lco_csum(skb), - csum_unfold(l4.tcp->check))); + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 7eaac3234049..cbf70fe4028a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3329,11 +3329,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, /* initialize outer IP header fields */ if (ip.v4->version == 4) { + unsigned char *csum_start = skb_checksum_start(skb); + unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + /* IP header will have to cancel out any data that * is not a part of the outer IP header */ - ip.v4->check = csum_fold(csum_add(lco_csum(skb), - csum_unfold(l4.tcp->check))); + ip.v4->check = csum_fold(csum_partial(trans_start, + csum_start - trans_start, + 0)); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index bf5cc55ba24c..5b12022adf1f 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1381,6 +1381,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) temp = (val & 0x003fff00) >> 8; temp *= 64000000; + temp += mp->t_clk / 2; do_div(temp, mp->t_clk); return (unsigned int)temp; @@ -1417,6 +1418,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; temp *= 64000000; + temp += mp->t_clk / 2; do_div(temp, mp->t_clk); return (unsigned int)temp; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 5cb07c2017bf..707bc4680b9b 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4151,7 +4151,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; - dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; err = register_netdev(dev); @@ -4191,6 +4191,8 @@ err_clk: clk_disable_unprepare(pp->clk); err_put_phy_node: of_node_put(phy_node); + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); err_free_irq: irq_dispose_mapping(dev->irq); err_free_netdev: @@ -4202,6 +4204,7 @@ err_free_netdev: static int mvneta_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct device_node *dn = pdev->dev.of_node; struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); @@ -4209,6 +4212,8 @@ static int mvneta_remove(struct platform_device *pdev) clk_disable_unprepare(pp->clk); free_percpu(pp->ports); free_percpu(pp->stats); + if (of_phy_is_fixed_link(dn)) + of_phy_deregister_fixed_link(dn); irq_dispose_mapping(dev->irq); of_node_put(pp->phy_node); free_netdev(dev); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 60227a3452a4..1026c452e39d 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3293,7 +3293,7 @@ static void mvpp2_cls_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); /* Clear classifier flow table */ - memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); + memset(&fe.data, 0, sizeof(fe.data)); for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { fe.index = index; mvpp2_cls_flow_write(priv, &fe); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f05ea56dcff2..941c8e2c944e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); static void sky2_shutdown(struct pci_dev *pdev) { + struct sky2_hw *hw = pci_get_drvdata(pdev); + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *ndev = hw->dev[port]; + + rtnl_lock(); + if (netif_running(ndev)) { + dev_close(ndev); + netif_device_detach(ndev); + } + rtnl_unlock(); + } sky2_suspend(&pdev->dev); pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); pci_set_power_state(pdev, PCI_D3hot); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4a62ffd7729d..86a89cbd3ec9 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -318,6 +318,8 @@ static int mtk_phy_connect(struct net_device *dev) return 0; err_phy: + if (of_phy_is_fixed_link(mac->of_node)) + of_phy_deregister_fixed_link(mac->of_node); of_node_put(np); dev_err(eth->dev, "%s: invalid phy\n", __func__); return -EINVAL; @@ -1923,6 +1925,8 @@ static void mtk_uninit(struct net_device *dev) struct mtk_eth *eth = mac->hw; phy_disconnect(dev->phydev); + if (of_phy_is_fixed_link(mac->of_node)) + of_phy_deregister_fixed_link(mac->of_node); mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0); mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 12c99a2655f2..fb8bb027b69c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -129,6 +129,9 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) } }; +/* Must not acquire state_lock, as its corresponding work_sync + * is done under it. + */ static void mlx4_en_filter_work(struct work_struct *work) { struct mlx4_en_filter *filter = container_of(work, @@ -2076,13 +2079,6 @@ err: return -ENOMEM; } -static void mlx4_en_shutdown(struct net_device *dev) -{ - rtnl_lock(); - netif_device_detach(dev); - mlx4_en_close(dev); - rtnl_unlock(); -} static int mlx4_en_copy_priv(struct mlx4_en_priv *dst, struct mlx4_en_priv *src, @@ -2159,8 +2155,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; - bool shutdown = mdev->dev->persist->interface_state & - MLX4_INTERFACE_STATE_SHUTDOWN; en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); @@ -2168,10 +2162,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) if (priv->registered) { devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, priv->port)); - if (shutdown) - mlx4_en_shutdown(dev); - else - unregister_netdev(dev); + unregister_netdev(dev); } if (priv->allocated) @@ -2189,20 +2180,18 @@ void mlx4_en_destroy_netdev(struct net_device *dev) mutex_lock(&mdev->state_lock); mdev->pndev[priv->port] = NULL; mdev->upper[priv->port] = NULL; - mutex_unlock(&mdev->state_lock); #ifdef CONFIG_RFS_ACCEL mlx4_en_cleanup_filters(priv); #endif mlx4_en_free_resources(priv); + mutex_unlock(&mdev->state_lock); kfree(priv->tx_ring); kfree(priv->tx_cq); - if (!shutdown) - free_netdev(dev); - dev->ethtool_ops = NULL; + free_netdev(dev); } static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 6f4e67bc3538..75d07fa9d0b1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4147,11 +4147,8 @@ static void mlx4_shutdown(struct pci_dev *pdev) mlx4_info(persist->dev, "mlx4_shutdown was called\n"); mutex_lock(&persist->interface_state_mutex); - if (persist->interface_state & MLX4_INTERFACE_STATE_UP) { - /* Notify mlx4 clients that the kernel is being shut down */ - persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN; + if (persist->interface_state & MLX4_INTERFACE_STATE_UP) mlx4_unload_one(pdev); - } mutex_unlock(&persist->interface_state_mutex); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 94b891c118c1..1a670b681555 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1457,7 +1457,12 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach); int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode) { - struct mlx4_net_trans_rule rule; + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + }; + u64 *regid_p; switch (mode) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f4c687ce4c59..84e8b250e2af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->num_tc = priv->params.num_tc; + c->xdp = !!priv->xdp_prog; if (priv->params.rx_am_enabled) rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); @@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_tx_cqs; + /* XDP SQ CQ params are same as normal TXQ sq CQ params */ + err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, + priv->params.tx_cq_moderation) : 0; + if (err) + goto err_close_rx_cq; + napi_enable(&c->napi); err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); @@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, } } - if (priv->xdp_prog) { - /* XDP SQ CQ params are same as normal TXQ sq CQ params */ - err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, - priv->params.tx_cq_moderation); - if (err) - goto err_close_sqs; + err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0; + if (err) + goto err_close_sqs; - err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq); - if (err) { - mlx5e_close_cq(&c->xdp_sq.cq); - goto err_close_sqs; - } - } - - c->xdp = !!priv->xdp_prog; err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_xdp_sq; @@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, return 0; err_close_xdp_sq: - mlx5e_close_sq(&c->xdp_sq); + if (c->xdp) + mlx5e_close_sq(&c->xdp_sq); err_close_sqs: mlx5e_close_sqs(c); @@ -1522,6 +1519,10 @@ err_close_icosq: err_disable_napi: napi_disable(&c->napi); + if (c->xdp) + mlx5e_close_cq(&c->xdp_sq.cq); + +err_close_rx_cq: mlx5e_close_cq(&c->rq.cq); err_close_tx_cqs: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7fe6559e4ab3..bf1c09ca73c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -308,7 +308,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; #endif - netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC; + netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ce8c54d18906..6bb21b31cfeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -237,12 +237,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_VLAN, f->mask); - if (mask->vlan_id) { + if (mask->vlan_id || mask->vlan_priority) { MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c55ad8d00c05..d239f5d0ea36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -57,7 +57,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (esw->mode != SRIOV_OFFLOADS) return ERR_PTR(-EOPNOTSUPP); - action = attr->action; + /* per flow vlan pop/push is emulated, don't set that into the firmware */ + action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 89696048b045..914e5466f729 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1690,7 +1690,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering) { steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); - if (IS_ERR_OR_NULL(steering->root_ns)) + if (!steering->root_ns) goto cleanup; if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d5433c49b2b0..3b7c6a9f2b5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #ifdef CONFIG_RFS_ACCEL #include @@ -1226,6 +1225,9 @@ static int init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); + dev->pdev = pdev; + dev->event = mlx5_core_event; + if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { mlx5_core_warn(dev, "selected profile out of range, selecting default (%d)\n", @@ -1233,8 +1235,6 @@ static int init_one(struct pci_dev *pdev, prof_sel = MLX5_DEFAULT_PROF; } dev->profile = &profile[prof_sel]; - dev->pdev = pdev; - dev->event = mlx5_core_event; INIT_LIST_HEAD(&priv->ctx_list); spin_lock_init(&priv->ctx_lock); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1ec0a4ce3c46..dda5761e91bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) span_entry->used = true; span_entry->id = index; - span_entry->ref_count = 0; + span_entry->ref_count = 1; span_entry->local_port = local_port; return span_entry; } @@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry span_entry = mlxsw_sp_span_entry_find(port); if (span_entry) { + /* Already exists, just take a reference */ span_entry->ref_count++; return span_entry; } @@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_span_entry *span_entry) { + WARN_ON(!span_entry->ref_count); if (--span_entry->ref_count == 0) mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); return 0; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 9b22863a924b..97bbc1d21df8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -115,7 +115,7 @@ struct mlxsw_sp_rif { struct mlxsw_sp_mid { struct list_head list; unsigned char addr[ETH_ALEN]; - u16 vid; + u16 fid; u16 mid; unsigned int ref_count; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4573da2c5560..e83072da6272 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -594,21 +594,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) return 0; } +static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp); + static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) { + mlxsw_sp_router_fib_flush(mlxsw_sp); kfree(mlxsw_sp->router.vrs); } struct mlxsw_sp_neigh_key { - unsigned char addr[sizeof(struct in6_addr)]; - struct net_device *dev; + struct neighbour *n; }; struct mlxsw_sp_neigh_entry { struct rhash_head ht_node; struct mlxsw_sp_neigh_key key; u16 rif; - struct neighbour *n; bool offloaded; struct delayed_work dw; struct mlxsw_sp_port *mlxsw_sp_port; @@ -646,19 +647,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); static struct mlxsw_sp_neigh_entry * -mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len, - struct net_device *dev, u16 rif, - struct neighbour *n) +mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif) { struct mlxsw_sp_neigh_entry *neigh_entry; neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); if (!neigh_entry) return NULL; - memcpy(neigh_entry->key.addr, addr, addr_len); - neigh_entry->key.dev = dev; + neigh_entry->key.n = n; neigh_entry->rif = rif; - neigh_entry->n = n; INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); INIT_LIST_HEAD(&neigh_entry->nexthop_list); return neigh_entry; @@ -671,13 +668,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry) } static struct mlxsw_sp_neigh_entry * -mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr, - size_t addr_len, struct net_device *dev) +mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) { - struct mlxsw_sp_neigh_key key = {{ 0 } }; + struct mlxsw_sp_neigh_key key; - memcpy(key.addr, addr, addr_len); - key.dev = dev; + key.n = n; return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, &key, mlxsw_sp_neigh_ht_params); } @@ -689,26 +684,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev, struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_neigh_entry *neigh_entry; struct mlxsw_sp_rif *r; - u32 dip; int err; if (n->tbl != &arp_tbl) return 0; - dip = ntohl(*((__be32 *) n->primary_key)); - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), - n->dev); - if (neigh_entry) { - WARN_ON(neigh_entry->n != n); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); + if (neigh_entry) return 0; - } r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); if (WARN_ON(!r)) return -EINVAL; - neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev, - r->rif, n); + neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif); if (!neigh_entry) return -ENOMEM; err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); @@ -727,14 +716,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_neigh_entry *neigh_entry; - u32 dip; if (n->tbl != &arp_tbl) return; - dip = ntohl(*((__be32 *) n->primary_key)); - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), - n->dev); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); if (!neigh_entry) return; mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); @@ -817,6 +803,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, } } +static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) +{ + u8 num_rec, last_rec_index, num_entries; + + num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); + last_rec_index = num_rec - 1; + + if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM) + return false; + if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) == + MLXSW_REG_RAUHTD_TYPE_IPV6) + return true; + + num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, + last_rec_index); + if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC) + return true; + return false; +} + static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) { char *rauhtd_pl; @@ -843,7 +849,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) for (i = 0; i < num_rec; i++) mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, i); - } while (num_rec); + } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); rtnl_unlock(); kfree(rauhtd_pl); @@ -862,7 +868,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) * is active regardless of the traffic. */ if (!list_empty(&neigh_entry->nexthop_list)) - neigh_event_send(neigh_entry->n, NULL); + neigh_event_send(neigh_entry->key.n, NULL); } rtnl_unlock(); } @@ -908,9 +914,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) rtnl_lock(); list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, nexthop_neighs_list_node) { - if (!(neigh_entry->n->nud_state & NUD_VALID) && + if (!(neigh_entry->key.n->nud_state & NUD_VALID) && !list_empty(&neigh_entry->nexthop_list)) - neigh_event_send(neigh_entry->n, NULL); + neigh_event_send(neigh_entry->key.n, NULL); } rtnl_unlock(); @@ -927,7 +933,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) { struct mlxsw_sp_neigh_entry *neigh_entry = container_of(work, struct mlxsw_sp_neigh_entry, dw.work); - struct neighbour *n = neigh_entry->n; + struct neighbour *n = neigh_entry->key.n; struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char rauht_pl[MLXSW_REG_RAUHT_LEN]; @@ -1030,11 +1036,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, mlxsw_sp = mlxsw_sp_port->mlxsw_sp; dip = ntohl(*((__be32 *) n->primary_key)); - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, - &dip, - sizeof(__be32), - dev); - if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) { + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); + if (WARN_ON(!neigh_entry)) { mlxsw_sp_port_dev_put(mlxsw_sp_port); return NOTIFY_DONE; } @@ -1343,33 +1346,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, struct fib_nh *fib_nh) { struct mlxsw_sp_neigh_entry *neigh_entry; - u32 gwip = ntohl(fib_nh->nh_gw); struct net_device *dev = fib_nh->nh_dev; struct neighbour *n; u8 nud_state; - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, - sizeof(gwip), dev); - if (!neigh_entry) { - __be32 gwipn = htonl(gwip); - - n = neigh_create(&arp_tbl, &gwipn, dev); + /* Take a reference of neigh here ensuring that neigh would + * not be detructed before the nexthop entry is finished. + * The reference is taken either in neigh_lookup() or + * in neith_create() in case n is not found. + */ + n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev); + if (!n) { + n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev); if (IS_ERR(n)) return PTR_ERR(n); neigh_event_send(n, NULL); - neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, - sizeof(gwip), dev); - if (!neigh_entry) { - neigh_release(n); - return -EINVAL; - } - } else { - /* Take a reference of neigh here ensuring that neigh would - * not be detructed before the nexthop entry is finished. - * The second branch takes the reference in neith_create() - */ - n = neigh_entry->n; - neigh_clone(n); + } + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); + if (!neigh_entry) { + neigh_release(n); + return -EINVAL; } /* If that is the first nexthop connected to that neigh, add to @@ -1403,7 +1399,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, if (list_empty(&nh->neigh_entry->nexthop_list)) list_del(&nh->neigh_entry->nexthop_neighs_list_node); - neigh_release(neigh_entry->n); + neigh_release(neigh_entry->key.n); } static struct mlxsw_sp_nexthop_group * @@ -1463,11 +1459,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh, for (i = 0; i < fi->fib_nhs; i++) { struct fib_nh *fib_nh = &fi->fib_nh[i]; - u32 gwip = ntohl(fib_nh->nh_gw); + struct neighbour *n = nh->neigh_entry->key.n; - if (memcmp(nh->neigh_entry->key.addr, - &gwip, sizeof(u32)) == 0 && - nh->neigh_entry->key.dev == fib_nh->nh_dev) + if (memcmp(n->primary_key, &fib_nh->nh_gw, + sizeof(fib_nh->nh_gw)) == 0 && + n->dev == fib_nh->nh_dev) return true; } return false; @@ -1874,18 +1870,18 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_resources *resources; struct mlxsw_sp_fib_entry *fib_entry; struct mlxsw_sp_fib_entry *tmp; struct mlxsw_sp_vr *vr; int i; - int err; resources = mlxsw_core_resources_get(mlxsw_sp->core); for (i = 0; i < resources->max_virtual_routers; i++) { vr = &mlxsw_sp->router.vrs[i]; + if (!vr->used) continue; @@ -1901,6 +1897,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) break; } } +} + +static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + mlxsw_sp_router_fib_flush(mlxsw_sp); mlxsw_sp->router.aborted = true; err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); if (err) @@ -1958,6 +1961,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, struct fib_entry_notifier_info *fen_info = ptr; int err; + if (!net_eq(fen_info->info.net, &init_net)) + return NOTIFY_DONE; + switch (event) { case FIB_EVENT_ENTRY_ADD: err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 5e00c79e8133..1e2c8eca3af1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, const unsigned char *addr, - u16 vid) + u16 fid) { struct mlxsw_sp_mid *mid; list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { - if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) + if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) return mid; } return NULL; @@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, const unsigned char *addr, - u16 vid) + u16 fid) { struct mlxsw_sp_mid *mid; u16 mid_idx; @@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, set_bit(mid_idx, mlxsw_sp->br_mids.mapped); ether_addr_copy(mid->addr, addr); - mid->vid = vid; + mid->fid = fid; mid->mid = mid_idx; mid->ref_count = 0; list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); @@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, if (switchdev_trans_ph_prepare(trans)) return 0; - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); if (!mid) { - mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); + mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid); if (!mid) { netdev_err(dev, "Unable to allocate MC group\n"); return -ENOMEM; @@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid_idx; int err = 0; - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); + mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); if (!mid) { netdev_err(dev, "Unable to remove port from MC DB\n"); return -EINVAL; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 72eee29c677f..2777d5bb4380 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -727,9 +727,6 @@ struct core_tx_bd_flags { #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 -#define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1 -#define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12 - }; struct core_tx_bd { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 63e1a1b0ef8e..f95385cbbd40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1119,6 +1119,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << CORE_TX_BD_FLAGS_START_BD_SHIFT; SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); + SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type); DMA_REGPAIR_LE(start_bd->addr, first_frag); start_bd->nbytes = cpu_to_le16(first_frag_len); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c418360ba02a..333c7442e48a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -839,20 +839,19 @@ static void qed_update_pf_params(struct qed_dev *cdev, { int i; + if (IS_ENABLED(CONFIG_QED_RDMA)) { + params->rdma_pf_params.num_qps = QED_ROCE_QPS; + params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; + /* divide by 3 the MRs to avoid MF ILT overflow */ + params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; + params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; + } + for (i = 0; i < cdev->num_hwfns; i++) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; p_hwfn->pf_params = *params; } - - if (!IS_ENABLED(CONFIG_QED_RDMA)) - return; - - params->rdma_pf_params.num_qps = QED_ROCE_QPS; - params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; - /* divide by 3 the MRs to avoid MF ILT overflow */ - params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; - params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; } static int qed_slowpath_start(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 12251a1032d1..7567cc464b88 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -175,16 +175,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { int tc; - for (j = 0; j < QEDE_NUM_RQSTATS; j++) - sprintf(buf + (k + j) * ETH_GSTRING_LEN, - "%d: %s", i, qede_rqstats_arr[j].string); - k += QEDE_NUM_RQSTATS; - for (tc = 0; tc < edev->num_tc; tc++) { - for (j = 0; j < QEDE_NUM_TQSTATS; j++) + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + for (j = 0; j < QEDE_NUM_RQSTATS; j++) sprintf(buf + (k + j) * ETH_GSTRING_LEN, - "%d.%d: %s", i, tc, - qede_tqstats_arr[j].string); - k += QEDE_NUM_TQSTATS; + "%d: %s", i, + qede_rqstats_arr[j].string); + k += QEDE_NUM_RQSTATS; + } + + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + for (tc = 0; tc < edev->num_tc; tc++) { + for (j = 0; j < QEDE_NUM_TQSTATS; j++) + sprintf(buf + (k + j) * + ETH_GSTRING_LEN, + "%d.%d: %s", i, tc, + qede_tqstats_arr[j].string); + k += QEDE_NUM_TQSTATS; + } } } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 7def29aaf65c..85f46dbecd5b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2839,7 +2839,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) } mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, - rxq->rx_buf_size, DMA_FROM_DEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { DP_NOTICE(edev, "Failed to map TPA replacement buffer\n"); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 6fb3bee904d3..0b4deb31e742 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt) mac |= TXEN | RXEN; /* enable RX/TX */ - /* We don't have ethtool support yet, so force flow-control mode - * to 'full' always. - */ - mac |= TXFC | RXFC; + /* Configure MAC flow control to match the PHY's settings. */ + if (phydev->pause) + mac |= RXFC; + if (phydev->pause != phydev->asym_pause) + mac |= TXFC; /* setup link speed */ mac &= ~SPEED_MASK; @@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt) writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); + /* Enable pause frames. Without this feature, the EMAC has been shown + * to receive (and drop) frames with FCS errors at gigabit connections. + */ + adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + adpt->phydev->irq = PHY_IGNORE_INTERRUPT; phy_start(adpt->phydev); diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index da4e90db4d98..99a14df28b96 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c @@ -212,6 +212,7 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt) phy_np = of_parse_phandle(np, "phy-handle", 0); adpt->phydev = of_phy_find_device(phy_np); + of_node_put(phy_np); } if (!adpt->phydev) { diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 75c1b530e39e..72fe343c7a36 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = { /* CDR Settings */ {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, - {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)}, + {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)}, {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, /* TX/RX Settings */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 4fede4b86538..57b35aeac51a 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -711,6 +711,8 @@ static int emac_probe(struct platform_device *pdev) err_undo_napi: netif_napi_del(&adpt->rx_q.napi); err_undo_mdiobus: + if (!has_acpi_companion(&pdev->dev)) + put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); err_undo_clocks: emac_clks_teardown(adpt); @@ -730,6 +732,8 @@ static int emac_remove(struct platform_device *pdev) emac_clks_teardown(adpt); + if (!has_acpi_companion(&pdev->dev)) + put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); free_netdev(netdev); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 630536bc72f9..d6a217874a8b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1008,20 +1008,18 @@ static int ravb_phy_init(struct net_device *ndev) of_node_put(pn); if (!phydev) { netdev_err(ndev, "failed to connect PHY\n"); - return -ENOENT; + err = -ENOENT; + goto err_deregister_fixed_link; } /* This driver only support 10/100Mbit speeds on Gen3 * at this time. */ if (priv->chip_id == RCAR_GEN3) { - int err; - err = phy_set_max_speed(phydev, SPEED_100); if (err) { netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); - phy_disconnect(phydev); - return err; + goto err_phy_disconnect; } netdev_info(ndev, "limited PHY to 100Mbit/s\n"); @@ -1033,6 +1031,14 @@ static int ravb_phy_init(struct net_device *ndev) phy_attached_info(phydev); return 0; + +err_phy_disconnect: + phy_disconnect(phydev); +err_deregister_fixed_link: + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + + return err; } /* PHY control start function */ @@ -1634,6 +1640,7 @@ static void ravb_set_rx_mode(struct net_device *ndev) /* Device close function for Ethernet AVB */ static int ravb_close(struct net_device *ndev) { + struct device_node *np = ndev->dev.parent->of_node; struct ravb_private *priv = netdev_priv(ndev); struct ravb_tstamp_skb *ts_skb, *ts_skb2; @@ -1663,6 +1670,8 @@ static int ravb_close(struct net_device *ndev) if (ndev->phydev) { phy_stop(ndev->phydev); phy_disconnect(ndev->phydev); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); } if (priv->chip_id != RCAR_GEN2) { diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 05b0dc55de77..1a92de705199 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -518,7 +518,7 @@ static struct sh_eth_cpu_data r7s72100_data = { .ecsr_value = ECSR_ICD, .ecsipr_value = ECSIPR_ICDIP, - .eesipr_value = 0xff7f009f, + .eesipr_value = 0xe77f009f, .tx_check = EESR_TC1 | EESR_FTC, .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3cf3557106c2..6b89e4a7b164 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel) *channel = *old_channel; channel->napi_dev = NULL; + INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); + channel->napi_str.napi_id = 0; + channel->napi_str.state = 0; memset(&channel->eventq, 0, sizeof(channel->eventq)); for (j = 0; j < EFX_TXQ_TYPES; j++) { diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 3818c5e06eba..4b78168a5f3c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -107,7 +107,7 @@ config DWMAC_STI config DWMAC_STM32 tristate "STM32 DWMAC support" default ARCH_STM32 - depends on OF && HAS_IOMEM + depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST) select MFD_SYSCON ---help--- Support for ethernet controller on STM32 SOCs. diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index 2920e2ee3864..489ef146201e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -63,8 +63,8 @@ #define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 #define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 #define TSE_PCS_SW_RESET_TIMEOUT 100 -#define TSE_PCS_USE_SGMII_AN_MASK BIT(2) -#define TSE_PCS_USE_SGMII_ENA BIT(1) +#define TSE_PCS_USE_SGMII_AN_MASK BIT(1) +#define TSE_PCS_USE_SGMII_ENA BIT(0) #define SGMII_ADAPTER_CTRL_REG 0x00 #define SGMII_ADAPTER_DISABLE 0x0001 diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index d3292c4a6eda..6d2de4e01f6d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -120,14 +120,17 @@ struct stmmac_extra_stats { unsigned long ip_csum_bypassed; unsigned long ipv4_pkt_rcvd; unsigned long ipv6_pkt_rcvd; - unsigned long rx_msg_type_ext_no_ptp; - unsigned long rx_msg_type_sync; - unsigned long rx_msg_type_follow_up; - unsigned long rx_msg_type_delay_req; - unsigned long rx_msg_type_delay_resp; - unsigned long rx_msg_type_pdelay_req; - unsigned long rx_msg_type_pdelay_resp; - unsigned long rx_msg_type_pdelay_follow_up; + unsigned long no_ptp_rx_msg_type_ext; + unsigned long ptp_rx_msg_type_sync; + unsigned long ptp_rx_msg_type_follow_up; + unsigned long ptp_rx_msg_type_delay_req; + unsigned long ptp_rx_msg_type_delay_resp; + unsigned long ptp_rx_msg_type_pdelay_req; + unsigned long ptp_rx_msg_type_pdelay_resp; + unsigned long ptp_rx_msg_type_pdelay_follow_up; + unsigned long ptp_rx_msg_type_announce; + unsigned long ptp_rx_msg_type_management; + unsigned long ptp_rx_msg_pkt_reserved_type; unsigned long ptp_frame_type; unsigned long ptp_ver; unsigned long timestamp_dropped; @@ -482,11 +485,12 @@ struct stmmac_ops { /* PTP and HW Timer helpers */ struct stmmac_hwtimestamp { void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); - u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate); + u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, + int gmac4); int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); int (*config_addend) (void __iomem *ioaddr, u32 addend); int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, - int add_sub); + int add_sub, int gmac4); u64(*get_systime) (void __iomem *ioaddr); }; diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 2e4c171a2b41..e3c86d422109 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -155,14 +155,18 @@ #define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) /* Extended RDES4 message type definitions */ -#define RDES_EXT_NO_PTP 0 -#define RDES_EXT_SYNC 1 -#define RDES_EXT_FOLLOW_UP 2 -#define RDES_EXT_DELAY_REQ 3 -#define RDES_EXT_DELAY_RESP 4 -#define RDES_EXT_PDELAY_REQ 5 -#define RDES_EXT_PDELAY_RESP 6 -#define RDES_EXT_PDELAY_FOLLOW_UP 7 +#define RDES_EXT_NO_PTP 0x0 +#define RDES_EXT_SYNC 0x1 +#define RDES_EXT_FOLLOW_UP 0x2 +#define RDES_EXT_DELAY_REQ 0x3 +#define RDES_EXT_DELAY_RESP 0x4 +#define RDES_EXT_PDELAY_REQ 0x5 +#define RDES_EXT_PDELAY_RESP 0x6 +#define RDES_EXT_PDELAY_FOLLOW_UP 0x7 +#define RDES_PTP_ANNOUNCE 0x8 +#define RDES_PTP_MANAGEMENT 0x9 +#define RDES_PTP_SIGNALING 0xa +#define RDES_PTP_PKT_RESERVED_TYPE 0xf /* Basic descriptor structure for normal and alternate descriptors */ struct dma_desc { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c index b1e5f24708c9..e6e6c2fcc4b7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -50,10 +50,23 @@ static int dwmac_generic_probe(struct platform_device *pdev) if (plat_dat->init) { ret = plat_dat->init(pdev, plat_dat->bsp_priv); if (ret) - return ret; + goto err_remove_config_dt; } - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_exit; + + return 0; + +err_exit: + if (plat_dat->exit) + plat_dat->exit(pdev, plat_dat->bsp_priv); +err_remove_config_dt: + if (pdev->dev.of_node) + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; } static const struct of_device_id dwmac_generic_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 36d3355f2fb0..866444b6c82f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -271,15 +271,17 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) return PTR_ERR(plat_dat); gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); - if (!gmac) - return -ENOMEM; + if (!gmac) { + err = -ENOMEM; + goto err_remove_config_dt; + } gmac->pdev = pdev; err = ipq806x_gmac_of_parse(gmac); if (err) { dev_err(dev, "device tree parsing error\n"); - return err; + goto err_remove_config_dt; } regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL, @@ -300,7 +302,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); - return -EINVAL; + err = -EINVAL; + goto err_remove_config_dt; } regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); @@ -319,7 +322,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", phy_modes(gmac->phy_mode)); - return -EINVAL; + err = -EINVAL; + goto err_remove_config_dt; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); @@ -346,7 +350,16 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (err) + goto err_remove_config_dt; + + return 0; + +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return err; } static const struct of_device_id ipq806x_gmac_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c index 78e9d1861896..3d3f43d91b98 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c @@ -46,7 +46,8 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg"); if (IS_ERR(reg)) { dev_err(&pdev->dev, "syscon lookup failed\n"); - return PTR_ERR(reg); + ret = PTR_ERR(reg); + goto err_remove_config_dt; } if (plat_dat->interface == PHY_INTERFACE_MODE_MII) { @@ -55,13 +56,23 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev) ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII; } else { dev_err(&pdev->dev, "Only MII and RMII mode supported\n"); - return -EINVAL; + ret = -EINVAL; + goto err_remove_config_dt; } regmap_update_bits(reg, LPC18XX_CREG_CREG6, LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode); - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_remove_config_dt; + + return 0; + +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; } static const struct of_device_id lpc18xx_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index 309d99536a2c..7fdd1760a74c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -64,18 +64,31 @@ static int meson6_dwmac_probe(struct platform_device *pdev) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return -ENOMEM; + if (!dwmac) { + ret = -ENOMEM; + goto err_remove_config_dt; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->reg = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dwmac->reg)) - return PTR_ERR(dwmac->reg); + if (IS_ERR(dwmac->reg)) { + ret = PTR_ERR(dwmac->reg); + goto err_remove_config_dt; + } plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_remove_config_dt; + + return 0; + +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; } static const struct of_device_id meson6_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 250e4ceafc8d..ffaed1f35efe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -264,32 +264,48 @@ static int meson8b_dwmac_probe(struct platform_device *pdev) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return -ENOMEM; + if (!dwmac) { + ret = -ENOMEM; + goto err_remove_config_dt; + } res = platform_get_resource(pdev, IORESOURCE_MEM, 1); dwmac->regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dwmac->regs)) - return PTR_ERR(dwmac->regs); + if (IS_ERR(dwmac->regs)) { + ret = PTR_ERR(dwmac->regs); + goto err_remove_config_dt; + } dwmac->pdev = pdev; dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node); if (dwmac->phy_mode < 0) { dev_err(&pdev->dev, "missing phy-mode property\n"); - return -EINVAL; + ret = -EINVAL; + goto err_remove_config_dt; } ret = meson8b_init_clk(dwmac); if (ret) - return ret; + goto err_remove_config_dt; ret = meson8b_init_prg_eth(dwmac); if (ret) - return ret; + goto err_remove_config_dt; plat_dat->bsp_priv = dwmac; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_clk_disable; + + return 0; + +err_clk_disable: + clk_disable_unprepare(dwmac->m25_div_clk); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; } static int meson8b_dwmac_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 3740a4417fa0..d80c88bd2bba 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -981,14 +981,27 @@ static int rk_gmac_probe(struct platform_device *pdev) plat_dat->resume = rk_gmac_resume; plat_dat->bsp_priv = rk_gmac_setup(pdev, data); - if (IS_ERR(plat_dat->bsp_priv)) - return PTR_ERR(plat_dat->bsp_priv); + if (IS_ERR(plat_dat->bsp_priv)) { + ret = PTR_ERR(plat_dat->bsp_priv); + goto err_remove_config_dt; + } ret = rk_gmac_init(pdev, plat_dat->bsp_priv); if (ret) - return ret; + goto err_remove_config_dt; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_gmac_exit; + + return 0; + +err_gmac_exit: + rk_gmac_exit(pdev, plat_dat->bsp_priv); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; } static const struct of_device_id rk_gmac_dwmac_match[] = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index bec6963ac71e..0c420e97de1e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -304,6 +304,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; int ret; struct socfpga_dwmac *dwmac; + struct net_device *ndev; + struct stmmac_priv *stpriv; ret = stmmac_get_platform_resources(pdev, &stmmac_res); if (ret) @@ -314,32 +316,43 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return -ENOMEM; + if (!dwmac) { + ret = -ENOMEM; + goto err_remove_config_dt; + } ret = socfpga_dwmac_parse_data(dwmac, dev); if (ret) { dev_err(dev, "Unable to parse OF data\n"); - return ret; + goto err_remove_config_dt; } plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_remove_config_dt; - if (!ret) { - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *stpriv = netdev_priv(ndev); + ndev = platform_get_drvdata(pdev); + stpriv = netdev_priv(ndev); - /* The socfpga driver needs to control the stmmac reset to - * set the phy mode. Create a copy of the core reset handel - * so it can be used by the driver later. - */ - dwmac->stmmac_rst = stpriv->stmmac_rst; + /* The socfpga driver needs to control the stmmac reset to set the phy + * mode. Create a copy of the core reset handle so it can be used by + * the driver later. + */ + dwmac->stmmac_rst = stpriv->stmmac_rst; - ret = socfpga_dwmac_set_phy_mode(dwmac); - } + ret = socfpga_dwmac_set_phy_mode(dwmac); + if (ret) + goto err_dvr_remove; + + return 0; + +err_dvr_remove: + stmmac_dvr_remove(&pdev->dev); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 58c05acc2aab..060b98c37a85 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c @@ -345,13 +345,15 @@ static int sti_dwmac_probe(struct platform_device *pdev) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return -ENOMEM; + if (!dwmac) { + ret = -ENOMEM; + goto err_remove_config_dt; + } ret = sti_dwmac_parse_data(dwmac, pdev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); - return ret; + goto err_remove_config_dt; } dwmac->fix_retime_src = data->fix_retime_src; @@ -363,9 +365,20 @@ static int sti_dwmac_probe(struct platform_device *pdev) ret = sti_dwmac_init(pdev, plat_dat->bsp_priv); if (ret) - return ret; + goto err_remove_config_dt; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + goto err_dwmac_exit; + + return 0; + +err_dwmac_exit: + sti_dwmac_exit(pdev, plat_dat->bsp_priv); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); + + return ret; } static const struct sti_dwmac_of_data stih4xx_dwmac_data = { diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index e5a926b8bee7..61cb24810d10 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -107,24 +107,33 @@ static int stm32_dwmac_probe(struct platform_device *pdev) return PTR_ERR(plat_dat); dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); - if (!dwmac) - return -ENOMEM; + if (!dwmac) { + ret = -ENOMEM; + goto err_remove_config_dt; + } ret = stm32_dwmac_parse_data(dwmac, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Unable to parse OF data\n"); - return ret; + goto err_remove_config_dt; } plat_dat->bsp_priv = dwmac; ret = stm32_dwmac_init(plat_dat); if (ret) - return ret; + goto err_remove_config_dt; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) - stm32_dwmac_clk_disable(dwmac); + goto err_clk_disable; + + return 0; + +err_clk_disable: + stm32_dwmac_clk_disable(dwmac); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index adff46375a32..d07520fb969e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -120,22 +120,27 @@ static int sun7i_gmac_probe(struct platform_device *pdev) return PTR_ERR(plat_dat); gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL); - if (!gmac) - return -ENOMEM; + if (!gmac) { + ret = -ENOMEM; + goto err_remove_config_dt; + } gmac->interface = of_get_phy_mode(dev->of_node); gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx"); if (IS_ERR(gmac->tx_clk)) { dev_err(dev, "could not get tx clock\n"); - return PTR_ERR(gmac->tx_clk); + ret = PTR_ERR(gmac->tx_clk); + goto err_remove_config_dt; } /* Optional regulator for PHY */ gmac->regulator = devm_regulator_get_optional(dev, "phy"); if (IS_ERR(gmac->regulator)) { - if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto err_remove_config_dt; + } dev_info(dev, "no regulator found\n"); gmac->regulator = NULL; } @@ -151,11 +156,18 @@ static int sun7i_gmac_probe(struct platform_device *pdev) ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv); if (ret) - return ret; + goto err_remove_config_dt; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) - sun7i_gmac_exit(pdev, plat_dat->bsp_priv); + goto err_gmac_exit; + + return 0; + +err_gmac_exit: + sun7i_gmac_exit(pdev, plat_dat->bsp_priv); +err_remove_config_dt: + stmmac_remove_config_dt(pdev, plat_dat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index a1b17cd7886b..a601f8d43b75 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -123,22 +123,29 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x, x->ipv4_pkt_rcvd++; if (rdes1 & RDES1_IPV6_HEADER) x->ipv6_pkt_rcvd++; - if (message_type == RDES_EXT_SYNC) - x->rx_msg_type_sync++; + + if (message_type == RDES_EXT_NO_PTP) + x->no_ptp_rx_msg_type_ext++; + else if (message_type == RDES_EXT_SYNC) + x->ptp_rx_msg_type_sync++; else if (message_type == RDES_EXT_FOLLOW_UP) - x->rx_msg_type_follow_up++; + x->ptp_rx_msg_type_follow_up++; else if (message_type == RDES_EXT_DELAY_REQ) - x->rx_msg_type_delay_req++; + x->ptp_rx_msg_type_delay_req++; else if (message_type == RDES_EXT_DELAY_RESP) - x->rx_msg_type_delay_resp++; + x->ptp_rx_msg_type_delay_resp++; else if (message_type == RDES_EXT_PDELAY_REQ) - x->rx_msg_type_pdelay_req++; + x->ptp_rx_msg_type_pdelay_req++; else if (message_type == RDES_EXT_PDELAY_RESP) - x->rx_msg_type_pdelay_resp++; + x->ptp_rx_msg_type_pdelay_resp++; else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) - x->rx_msg_type_pdelay_follow_up++; - else - x->rx_msg_type_ext_no_ptp++; + x->ptp_rx_msg_type_pdelay_follow_up++; + else if (message_type == RDES_PTP_ANNOUNCE) + x->ptp_rx_msg_type_announce++; + else if (message_type == RDES_PTP_MANAGEMENT) + x->ptp_rx_msg_type_management++; + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) + x->ptp_rx_msg_pkt_reserved_type++; if (rdes1 & RDES1_PTP_PACKET_TYPE) x->ptp_frame_type++; @@ -204,14 +211,18 @@ static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p) static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) { - return (p->des3 & TDES3_TIMESTAMP_STATUS) - >> TDES3_TIMESTAMP_STATUS_SHIFT; + /* Context type from W/B descriptor must be zero */ + if (p->des3 & TDES3_CONTEXT_TYPE) + return -EINVAL; + + /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ + if (p->des3 & TDES3_TIMESTAMP_STATUS) + return 0; + + return 1; } -/* NOTE: For RX CTX bit has to be checked before - * HAVE a specific function for TX and another one for RX - */ -static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats) +static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) { struct dma_desc *p = (struct dma_desc *)desc; u64 ns; @@ -223,12 +234,54 @@ static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats) return ns; } -static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) +static int dwmac4_rx_check_timestamp(void *desc) { struct dma_desc *p = (struct dma_desc *)desc; + u32 own, ctxt; + int ret = 1; - return (p->des1 & RDES1_TIMESTAMP_AVAILABLE) - >> RDES1_TIMESTAMP_AVAILABLE_SHIFT; + own = p->des3 & RDES3_OWN; + ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) + >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); + + if (likely(!own && ctxt)) { + if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) + /* Corrupted value */ + ret = -EINVAL; + else + /* A valid Timestamp is ready to be read */ + ret = 0; + } + + /* Timestamp not ready */ + return ret; +} + +static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + int ret = -EINVAL; + + /* Get the status from normal w/b descriptor */ + if (likely(p->des3 & TDES3_RS1V)) { + if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) { + int i = 0; + + /* Check if timestamp is OK from context descriptor */ + do { + ret = dwmac4_rx_check_timestamp(desc); + if (ret < 0) + goto exit; + i++; + + } while ((ret == 1) || (i < 10)); + + if (i == 10) + ret = -EBUSY; + } + } +exit: + return ret; } static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, @@ -373,8 +426,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, - .get_timestamp = dwmac4_wrback_get_timestamp, - .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, + .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status, + .get_timestamp = dwmac4_get_timestamp, .set_tx_ic = dwmac4_rd_set_tx_ic, .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h index 0902a2edeaa9..9736c505211a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h @@ -59,10 +59,13 @@ #define TDES3_CTXT_TCMSSV BIT(26) /* TDES3 Common */ +#define TDES3_RS1V BIT(26) +#define TDES3_RS1V_SHIFT 26 #define TDES3_LAST_DESCRIPTOR BIT(28) #define TDES3_LAST_DESCRIPTOR_SHIFT 28 #define TDES3_FIRST_DESCRIPTOR BIT(29) #define TDES3_CONTEXT_TYPE BIT(30) +#define TDES3_CONTEXT_TYPE_SHIFT 30 /* TDS3 use for both format (read and write back) */ #define TDES3_OWN BIT(31) @@ -117,6 +120,7 @@ #define RDES3_LAST_DESCRIPTOR BIT(28) #define RDES3_FIRST_DESCRIPTOR BIT(29) #define RDES3_CONTEXT_DESCRIPTOR BIT(30) +#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30 /* RDES3 (read format) */ #define RDES3_BUFFER1_VALID_ADDR BIT(24) diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 38f19c99cf59..e75549327c34 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -150,22 +150,30 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, x->ipv4_pkt_rcvd++; if (rdes4 & ERDES4_IPV6_PKT_RCVD) x->ipv6_pkt_rcvd++; - if (message_type == RDES_EXT_SYNC) - x->rx_msg_type_sync++; + + if (message_type == RDES_EXT_NO_PTP) + x->no_ptp_rx_msg_type_ext++; + else if (message_type == RDES_EXT_SYNC) + x->ptp_rx_msg_type_sync++; else if (message_type == RDES_EXT_FOLLOW_UP) - x->rx_msg_type_follow_up++; + x->ptp_rx_msg_type_follow_up++; else if (message_type == RDES_EXT_DELAY_REQ) - x->rx_msg_type_delay_req++; + x->ptp_rx_msg_type_delay_req++; else if (message_type == RDES_EXT_DELAY_RESP) - x->rx_msg_type_delay_resp++; + x->ptp_rx_msg_type_delay_resp++; else if (message_type == RDES_EXT_PDELAY_REQ) - x->rx_msg_type_pdelay_req++; + x->ptp_rx_msg_type_pdelay_req++; else if (message_type == RDES_EXT_PDELAY_RESP) - x->rx_msg_type_pdelay_resp++; + x->ptp_rx_msg_type_pdelay_resp++; else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) - x->rx_msg_type_pdelay_follow_up++; - else - x->rx_msg_type_ext_no_ptp++; + x->ptp_rx_msg_type_pdelay_follow_up++; + else if (message_type == RDES_PTP_ANNOUNCE) + x->ptp_rx_msg_type_announce++; + else if (message_type == RDES_PTP_MANAGEMENT) + x->ptp_rx_msg_type_management++; + else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) + x->ptp_rx_msg_pkt_reserved_type++; + if (rdes4 & ERDES4_PTP_FRAME_TYPE) x->ptp_frame_type++; if (rdes4 & ERDES4_PTP_VER) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index b15fc55f1b96..4d2a759b8465 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -129,6 +129,7 @@ struct stmmac_priv { int irq_wake; spinlock_t ptp_lock; void __iomem *mmcaddr; + void __iomem *ptpaddr; u32 rx_tail_addr; u32 tx_tail_addr; u32 mss; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 1e06173fc9d7..c5d0142adda2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -115,14 +115,17 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(ip_csum_bypassed), STMMAC_STAT(ipv4_pkt_rcvd), STMMAC_STAT(ipv6_pkt_rcvd), - STMMAC_STAT(rx_msg_type_ext_no_ptp), - STMMAC_STAT(rx_msg_type_sync), - STMMAC_STAT(rx_msg_type_follow_up), - STMMAC_STAT(rx_msg_type_delay_req), - STMMAC_STAT(rx_msg_type_delay_resp), - STMMAC_STAT(rx_msg_type_pdelay_req), - STMMAC_STAT(rx_msg_type_pdelay_resp), - STMMAC_STAT(rx_msg_type_pdelay_follow_up), + STMMAC_STAT(no_ptp_rx_msg_type_ext), + STMMAC_STAT(ptp_rx_msg_type_sync), + STMMAC_STAT(ptp_rx_msg_type_follow_up), + STMMAC_STAT(ptp_rx_msg_type_delay_req), + STMMAC_STAT(ptp_rx_msg_type_delay_resp), + STMMAC_STAT(ptp_rx_msg_type_pdelay_req), + STMMAC_STAT(ptp_rx_msg_type_pdelay_resp), + STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up), + STMMAC_STAT(ptp_rx_msg_type_announce), + STMMAC_STAT(ptp_rx_msg_type_management), + STMMAC_STAT(ptp_rx_msg_pkt_reserved_type), STMMAC_STAT(ptp_frame_type), STMMAC_STAT(ptp_ver), STMMAC_STAT(timestamp_dropped), diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index a77f68918010..10d6059b2f26 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -34,21 +34,29 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data) } static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, - u32 ptp_clock) + u32 ptp_clock, int gmac4) { u32 value = readl(ioaddr + PTP_TCR); unsigned long data; - /* Convert the ptp_clock to nano second - * formula = (2/ptp_clock) * 1000000000 - * where, ptp_clock = 50MHz. + /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second + * formula = (1/ptp_clock) * 1000000000 + * where ptp_clock is 50MHz if fine method is used to update system */ - data = (2000000000ULL / ptp_clock); + if (value & PTP_TCR_TSCFUPDT) + data = (1000000000ULL / 50000000); + else + data = (1000000000ULL / ptp_clock); /* 0.465ns accuracy */ if (!(value & PTP_TCR_TSCTRLSSR)) data = (data * 1000) / 465; + data &= PTP_SSIR_SSINC_MASK; + + if (gmac4) + data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; + writel(data, ioaddr + PTP_SSIR); return data; @@ -104,14 +112,30 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend) } static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, - int add_sub) + int add_sub, int gmac4) { u32 value; int limit; + if (add_sub) { + /* If the new sec value needs to be subtracted with + * the system time, then MAC_STSUR reg should be + * programmed with (2^32 – ) + */ + if (gmac4) + sec = (100000000ULL - sec); + + value = readl(ioaddr + PTP_TCR); + if (value & PTP_TCR_TSCTRLSSR) + nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec); + else + nsec = (PTP_BINARY_ROLLOVER_MODE - nsec); + } + writel(sec, ioaddr + PTP_STSUR); - writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec), - ioaddr + PTP_STNSUR); + value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec; + writel(value, ioaddr + PTP_STNSUR); + /* issue command to initialize the system time value */ value = readl(ioaddr + PTP_TCR); value |= PTP_TCR_TSUPDT; @@ -134,8 +158,9 @@ static u64 stmmac_get_systime(void __iomem *ioaddr) { u64 ns; + /* Get the TSSS value */ ns = readl(ioaddr + PTP_STNSR); - /* convert sec time value to nanosecond */ + /* Get the TSS and convert sec time value to nanosecond */ ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; return ns; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 48e71fad4210..caf069a465f2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -340,18 +340,17 @@ out: /* stmmac_get_tx_hwtstamp - get HW TX timestamps * @priv: driver private structure - * @entry : descriptor index to be used. + * @p : descriptor pointer * @skb : the socket buffer * Description : * This function will read timestamp from the descriptor & pass it to stack. * and also perform some sanity checks. */ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, - unsigned int entry, struct sk_buff *skb) + struct dma_desc *p, struct sk_buff *skb) { struct skb_shared_hwtstamps shhwtstamp; u64 ns; - void *desc = NULL; if (!priv->hwts_tx_en) return; @@ -360,58 +359,55 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) return; - if (priv->adv_ts) - desc = (priv->dma_etx + entry); - else - desc = (priv->dma_tx + entry); - /* check tx tstamp status */ - if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) - return; + if (!priv->hw->desc->get_tx_timestamp_status(p)) { + /* get the valid tstamp */ + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); - /* get the valid tstamp */ - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); + memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp.hwtstamp = ns_to_ktime(ns); - memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamp.hwtstamp = ns_to_ktime(ns); - /* pass tstamp to stack */ - skb_tstamp_tx(skb, &shhwtstamp); + netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); + /* pass tstamp to stack */ + skb_tstamp_tx(skb, &shhwtstamp); + } return; } /* stmmac_get_rx_hwtstamp - get HW RX timestamps * @priv: driver private structure - * @entry : descriptor index to be used. + * @p : descriptor pointer + * @np : next descriptor pointer * @skb : the socket buffer * Description : * This function will read received packet's timestamp from the descriptor * and pass it to stack. It also perform some sanity checks. */ -static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, - unsigned int entry, struct sk_buff *skb) +static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, + struct dma_desc *np, struct sk_buff *skb) { struct skb_shared_hwtstamps *shhwtstamp = NULL; u64 ns; - void *desc = NULL; if (!priv->hwts_rx_en) return; - if (priv->adv_ts) - desc = (priv->dma_erx + entry); - else - desc = (priv->dma_rx + entry); + /* Check if timestamp is available */ + if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { + /* For GMAC4, the valid timestamp is from CTX next desc. */ + if (priv->plat->has_gmac4) + ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); + else + ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); - /* exit if rx tstamp is not valid */ - if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) - return; - - /* get valid tstamp */ - ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); - shhwtstamp = skb_hwtstamps(skb); - memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); - shhwtstamp->hwtstamp = ns_to_ktime(ns); + netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); + shhwtstamp = skb_hwtstamps(skb); + memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); + shhwtstamp->hwtstamp = ns_to_ktime(ns); + } else { + netdev_err(priv->dev, "cannot get RX hw timestamp\n"); + } } /** @@ -598,17 +594,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; if (!priv->hwts_tx_en && !priv->hwts_rx_en) - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); + priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); else { value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | tstamp_all | ptp_v2 | ptp_over_ethernet | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | ts_master_en | snap_type_sel); - priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); + priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value); /* program Sub Second Increment reg */ sec_inc = priv->hw->ptp->config_sub_second_increment( - priv->ioaddr, priv->clk_ptp_rate); + priv->ptpaddr, priv->clk_ptp_rate, + priv->plat->has_gmac4); temp = div_u64(1000000000ULL, sec_inc); /* calculate default added value: @@ -618,14 +615,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) */ temp = (u64)(temp << 32); priv->default_addend = div_u64(temp, priv->clk_ptp_rate); - priv->hw->ptp->config_addend(priv->ioaddr, + priv->hw->ptp->config_addend(priv->ptpaddr, priv->default_addend); /* initialize system time */ ktime_get_real_ts64(&now); /* lower 32 bits of tv_sec are safe until y2106 */ - priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec, + priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); } @@ -880,6 +877,13 @@ static int stmmac_init_phy(struct net_device *dev) return -ENODEV; } + /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid + * subsequent PHY polling, make sure we force a link transition if + * we have a UP/DOWN/UP transition + */ + if (phydev->is_pseudo_fixed_link) + phydev->irq = PHY_POLL; + pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link); @@ -1333,7 +1337,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; } - stmmac_get_tx_hwtstamp(priv, entry, skb); + stmmac_get_tx_hwtstamp(priv, p, skb); } if (likely(priv->tx_skbuff_dma[entry].buf)) { @@ -1479,10 +1483,13 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; - if (priv->synopsys_id >= DWMAC_CORE_4_00) + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; - else + } else { + priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; + } dwmac_mmc_intr_all_mask(priv->mmcaddr); @@ -2477,7 +2484,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) if (netif_msg_rx_status(priv)) { void *rx_head; - pr_debug("%s: descriptor ring:\n", __func__); + pr_info(">>>>>> %s: descriptor ring:\n", __func__); if (priv->extend_desc) rx_head = (void *)priv->dma_erx; else @@ -2488,6 +2495,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) while (count < limit) { int status; struct dma_desc *p; + struct dma_desc *np; if (priv->extend_desc) p = (struct dma_desc *)(priv->dma_erx + entry); @@ -2507,9 +2515,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) next_entry = priv->cur_rx; if (priv->extend_desc) - prefetch(priv->dma_erx + next_entry); + np = (struct dma_desc *)(priv->dma_erx + next_entry); else - prefetch(priv->dma_rx + next_entry); + np = priv->dma_rx + next_entry; + + prefetch(np); if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) priv->hw->desc->rx_extended_status(&priv->dev->stats, @@ -2561,7 +2571,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) frame_len -= ETH_FCS_LEN; if (netif_msg_rx_status(priv)) { - pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", + pr_info("\tdesc: %p [entry %d] buff=0x%x\n", p, entry, des); if (frame_len > ETH_FRAME_LEN) pr_debug("\tframe size %d, COE: %d\n", @@ -2618,13 +2628,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) DMA_FROM_DEVICE); } - stmmac_get_rx_hwtstamp(priv, entry, skb); - if (netif_msg_pktdata(priv)) { pr_debug("frame received (%dbytes)", frame_len); print_pkt(skb->data, frame_len); } + stmmac_get_rx_hwtstamp(priv, p, np, skb); + stmmac_rx_vlan(priv->dev, skb); skb->protocol = eth_type_trans(skb, priv->dev); @@ -3406,7 +3416,6 @@ int stmmac_dvr_remove(struct device *dev) stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); - of_node_put(priv->plat->phy_node); if (priv->stmmac_rst) reset_control_assert(priv->stmmac_rst); clk_disable_unprepare(priv->pclk); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 0a0d6a86f397..a840818bf4df 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -200,7 +200,6 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, /** * stmmac_probe_config_dt - parse device-tree driver parameters * @pdev: platform_device structure - * @plat: driver data platform structure * @mac: MAC address to use * Description: * this function is to read the driver parameters from device-tree and @@ -306,7 +305,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (!dma_cfg) { - of_node_put(plat->phy_node); + stmmac_remove_config_dt(pdev, plat); return ERR_PTR(-ENOMEM); } plat->dma_cfg = dma_cfg; @@ -329,14 +328,37 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) return plat; } + +/** + * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() + * @pdev: platform_device structure + * @plat: driver data platform structure + * + * Release resources claimed by stmmac_probe_config_dt(). + */ +void stmmac_remove_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) +{ + struct device_node *np = pdev->dev.of_node; + + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + of_node_put(plat->phy_node); +} #else struct plat_stmmacenet_data * stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) { return ERR_PTR(-ENOSYS); } + +void stmmac_remove_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) +{ +} #endif /* CONFIG_OF */ EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); +EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res) @@ -392,10 +414,13 @@ int stmmac_pltfr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); + struct plat_stmmacenet_data *plat = priv->plat; int ret = stmmac_dvr_remove(&pdev->dev); - if (priv->plat->exit) - priv->plat->exit(pdev, priv->plat->bsp_priv); + if (plat->exit) + plat->exit(pdev, plat->bsp_priv); + + stmmac_remove_config_dt(pdev, plat); return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index 64e147f53a9c..b72eb0de57b7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -23,6 +23,8 @@ struct plat_stmmacenet_data * stmmac_probe_config_dt(struct platform_device *pdev, const char **mac); +void stmmac_remove_config_dt(struct platform_device *pdev, + struct plat_stmmacenet_data *plat); int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 1477471f8d44..3eb281d1db08 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -54,7 +54,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) spin_lock_irqsave(&priv->ptp_lock, flags); - priv->hw->ptp->config_addend(priv->ioaddr, addend); + priv->hw->ptp->config_addend(priv->ptpaddr, addend); spin_unlock_irqrestore(&priv->ptp_lock, flags); @@ -89,7 +89,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) spin_lock_irqsave(&priv->ptp_lock, flags); - priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); + priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj, + priv->plat->has_gmac4); spin_unlock_irqrestore(&priv->ptp_lock, flags); @@ -114,7 +115,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) spin_lock_irqsave(&priv->ptp_lock, flags); - ns = priv->hw->ptp->get_systime(priv->ioaddr); + ns = priv->hw->ptp->get_systime(priv->ptpaddr); spin_unlock_irqrestore(&priv->ptp_lock, flags); @@ -141,7 +142,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp, spin_lock_irqsave(&priv->ptp_lock, flags); - priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); + priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec); spin_unlock_irqrestore(&priv->ptp_lock, flags); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 4535df37c227..c06938c47af5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -22,51 +22,53 @@ Author: Rayagond Kokatanur ******************************************************************************/ -#ifndef __STMMAC_PTP_H__ -#define __STMMAC_PTP_H__ +#ifndef __STMMAC_PTP_H__ +#define __STMMAC_PTP_H__ + +#define PTP_GMAC4_OFFSET 0xb00 +#define PTP_GMAC3_X_OFFSET 0x700 /* IEEE 1588 PTP register offsets */ -#define PTP_TCR 0x0700 /* Timestamp Control Reg */ -#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ -#define PTP_STSR 0x0708 /* System Time – Seconds Regr */ -#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */ -#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */ -#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */ -#define PTP_TAR 0x0718 /* Timestamp Addend Reg */ -#define PTP_TTSR 0x071C /* Target Time Seconds Reg */ -#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */ -#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */ -#define PTP_TSR 0x0728 /* Timestamp Status */ +#define PTP_TCR 0x00 /* Timestamp Control Reg */ +#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */ +#define PTP_STSR 0x08 /* System Time – Seconds Regr */ +#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */ +#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ +#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ +#define PTP_TAR 0x18 /* Timestamp Addend Reg */ -#define PTP_STNSUR_ADDSUB_SHIFT 31 - -/* PTP TCR defines */ -#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */ -#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */ -#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */ -#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */ -/* Timestamp Interrupt Trigger Enable */ -#define PTP_TCR_TSTRIG 0x00000010 -#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */ -#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */ -/* Timestamp Digital or Binary Rollover Control */ -#define PTP_TCR_TSCTRLSSR 0x00000200 +#define PTP_STNSUR_ADDSUB_SHIFT 31 +#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ +#define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */ +/* PTP Timestamp control register defines */ +#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */ +#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */ +#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */ +#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */ +#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */ +#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */ +#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */ +#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */ /* Enable PTP packet Processing for Version 2 Format */ -#define PTP_TCR_TSVER2ENA 0x00000400 +#define PTP_TCR_TSVER2ENA BIT(10) /* Enable Processing of PTP over Ethernet Frames */ -#define PTP_TCR_TSIPENA 0x00000800 +#define PTP_TCR_TSIPENA BIT(11) /* Enable Processing of PTP Frames Sent over IPv6-UDP */ -#define PTP_TCR_TSIPV6ENA 0x00001000 +#define PTP_TCR_TSIPV6ENA BIT(12) /* Enable Processing of PTP Frames Sent over IPv4-UDP */ -#define PTP_TCR_TSIPV4ENA 0x00002000 +#define PTP_TCR_TSIPV4ENA BIT(13) /* Enable Timestamp Snapshot for Event Messages */ -#define PTP_TCR_TSEVNTENA 0x00004000 +#define PTP_TCR_TSEVNTENA BIT(14) /* Enable Snapshot for Messages Relevant to Master */ -#define PTP_TCR_TSMSTRENA 0x00008000 +#define PTP_TCR_TSMSTRENA BIT(15) /* Select PTP packets for Taking Snapshots */ -#define PTP_TCR_SNAPTYPSEL_1 0x00010000 +#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) /* Enable MAC address for PTP Frame Filtering */ -#define PTP_TCR_TSENMACADDR 0x00040000 +#define PTP_TCR_TSENMACADDR BIT(18) -#endif /* __STMMAC_PTP_H__ */ +/* SSIR defines */ +#define PTP_SSIR_SSINC_MASK 0xff +#define GMAC4_PTP_SSIR_SSINC_SHIFT 16 + +#endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index aa4f9d2d8fa9..02f452730d52 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -623,6 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq) void __iomem *gregs = bp->gregs; void __iomem *cregs = bp->creg; void __iomem *bregs = bp->bregs; + __u32 bblk_dvma = (__u32)bp->bblock_dvma; unsigned char *e = &bp->dev->dev_addr[0]; /* Latch current counters into statistics. */ @@ -671,9 +672,9 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq) bregs + BMAC_XIFCFG); /* Tell the QEC where the ring descriptors are. */ - sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), + sbus_writel(bblk_dvma + bib_offset(be_rxd, 0), cregs + CREG_RXDS); - sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), + sbus_writel(bblk_dvma + bib_offset(be_txd, 0), cregs + CREG_TXDS); /* Setup the FIFO pointers into QEC local memory. */ diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h index 06dd21707353..532fc56830cf 100644 --- a/drivers/net/ethernet/sun/sunbmac.h +++ b/drivers/net/ethernet/sun/sunbmac.h @@ -291,7 +291,7 @@ struct bigmac { void __iomem *bregs; /* BigMAC Registers */ void __iomem *tregs; /* BigMAC Transceiver */ struct bmac_init_block *bmac_block; /* RX and TX descriptors */ - __u32 bblock_dvma; /* RX and TX descriptors */ + dma_addr_t bblock_dvma; /* RX and TX descriptors */ spinlock_t lock; diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 9b825780b3be..9582948145c1 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c @@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep) { struct qe_init_block *qb = qep->qe_block; struct sunqe_buffers *qbufs = qep->buffers; - __u32 qbufs_dvma = qep->buffers_dvma; + __u32 qbufs_dvma = (__u32)qep->buffers_dvma; int i; qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; @@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq) void __iomem *mregs = qep->mregs; void __iomem *gregs = qecp->gregs; unsigned char *e = &qep->dev->dev_addr[0]; + __u32 qblk_dvma = (__u32)qep->qblock_dvma; u32 tmp; int i; @@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq) return -EAGAIN; /* Setup initial rx/tx init block pointers. */ - sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); - sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); + sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); + sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); /* Enable/mask the various irq's. */ sbus_writel(0, cregs + CREG_RIMASK); @@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep) struct net_device *dev = qep->dev; struct qe_rxd *this; struct sunqe_buffers *qbufs = qep->buffers; - __u32 qbufs_dvma = qep->buffers_dvma; + __u32 qbufs_dvma = (__u32)qep->buffers_dvma; int elem = qep->rx_new; u32 flags; @@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sunqe *qep = netdev_priv(dev); struct sunqe_buffers *qbufs = qep->buffers; - __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; + __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma; unsigned char *txbuf; int len, entry; diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h index 581781b6b2fa..ae190b77431b 100644 --- a/drivers/net/ethernet/sun/sunqe.h +++ b/drivers/net/ethernet/sun/sunqe.h @@ -334,12 +334,12 @@ struct sunqe { void __iomem *qcregs; /* QEC per-channel Registers */ void __iomem *mregs; /* Per-channel MACE Registers */ struct qe_init_block *qe_block; /* RX and TX descriptors */ - __u32 qblock_dvma; /* RX and TX descriptors */ + dma_addr_t qblock_dvma; /* RX and TX descriptors */ spinlock_t lock; /* Protects txfull state */ int rx_new, rx_old; /* RX ring extents */ int tx_new, tx_old; /* TX ring extents */ struct sunqe_buffers *buffers; /* CPU visible address. */ - __u32 buffers_dvma; /* DVMA visible address. */ + dma_addr_t buffers_dvma; /* DVMA visible address. */ struct sunqec *parent; u8 mconfig; /* Base MACE mconfig value */ struct platform_device *op; /* QE's OF device struct */ diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 5eedac495077..97d64bfed465 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -33,7 +33,6 @@ #include #include -#include #include #include #include @@ -43,7 +42,6 @@ #include #include -#include #include #include @@ -2883,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev) ret = of_get_phy_mode(lp->pdev->dev.of_node); if (ret < 0) { dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); - goto err_out_clk_dis_phy; + goto err_out_deregister_fixed_link; } lp->phy_interface = ret; @@ -2891,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev) ret = dwceqos_mii_init(lp); if (ret) { dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); - goto err_out_clk_dis_phy; + goto err_out_deregister_fixed_link; } ret = dwceqos_mii_probe(ndev); if (ret != 0) { netdev_err(ndev, "mii_probe fail.\n"); ret = -ENXIO; - goto err_out_clk_dis_phy; + goto err_out_deregister_fixed_link; } dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); @@ -2916,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev) if (ret) { dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", ret); - goto err_out_clk_dis_phy; + goto err_out_deregister_fixed_link; } dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", pdev->id, ndev->base_addr, ndev->irq); @@ -2926,7 +2924,7 @@ static int dwceqos_probe(struct platform_device *pdev) if (ret) { dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", ndev->irq, ret); - goto err_out_clk_dis_phy; + goto err_out_deregister_fixed_link; } if (netif_msg_probe(lp)) @@ -2937,11 +2935,14 @@ static int dwceqos_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret) { dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_clk_dis_phy; + goto err_out_deregister_fixed_link; } return 0; +err_out_deregister_fixed_link: + if (of_phy_is_fixed_link(pdev->dev.of_node)) + of_phy_deregister_fixed_link(pdev->dev.of_node); err_out_clk_dis_phy: clk_disable_unprepare(lp->phy_ref_clk); err_out_clk_dis_aper: @@ -2961,8 +2962,11 @@ static int dwceqos_remove(struct platform_device *pdev) if (ndev) { lp = netdev_priv(ndev); - if (ndev->phydev) + if (ndev->phydev) { phy_disconnect(ndev->phydev); + if (of_phy_is_fixed_link(pdev->dev.of_node)) + of_phy_deregister_fixed_link(pdev->dev.of_node); + } mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 054a8dd23dae..ba1e45ff6aae 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -176,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) } dev = bus_find_device(&platform_bus_type, NULL, node, match); + of_node_put(node); priv = dev_get_drvdata(dev); priv->cpsw_phy_sel(priv, phy_mode, slave); + + put_device(dev); } EXPORT_SYMBOL_GPL(cpsw_phy_sel); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c6cff3d2ff05..b9087b828eff 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2375,8 +2375,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, * to the PHY is the Ethernet MAC DT node. */ ret = of_phy_register_fixed_link(slave_node); - if (ret) + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret); return ret; + } slave_data->phy_node = of_node_get(slave_node); } else if (parp) { u32 phyid; @@ -2397,6 +2400,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); + put_device(&mdio->dev); } else { dev_err(&pdev->dev, "No slave[%d] phy_id, phy-handle, or fixed-link property\n", @@ -2440,6 +2444,34 @@ no_phy_slave: return 0; } +static void cpsw_remove_dt(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_platform_data *data = &cpsw->data; + struct device_node *node = pdev->dev.of_node; + struct device_node *slave_node; + int i = 0; + + for_each_available_child_of_node(node, slave_node) { + struct cpsw_slave_data *slave_data = &data->slave_data[i]; + + if (strcmp(slave_node->name, "slave")) + continue; + + if (of_phy_is_fixed_link(slave_node)) + of_phy_deregister_fixed_link(slave_node); + + of_node_put(slave_data->phy_node); + + i++; + if (i == data->slaves) + break; + } + + of_platform_depopulate(&pdev->dev); +} + static int cpsw_probe_dual_emac(struct cpsw_priv *priv) { struct cpsw_common *cpsw = priv->cpsw; @@ -2547,6 +2579,9 @@ static int cpsw_probe(struct platform_device *pdev) int irq; cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); + if (!cpsw) + return -ENOMEM; + cpsw->dev = &pdev->dev; ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); @@ -2584,11 +2619,19 @@ static int cpsw_probe(struct platform_device *pdev) /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - if (cpsw_probe_dt(&cpsw->data, pdev)) { - dev_err(&pdev->dev, "cpsw: platform data missing\n"); - ret = -ENODEV; + /* Need to enable clocks with runtime PM api to access module + * registers + */ + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); goto clean_runtime_disable_ret; } + + ret = cpsw_probe_dt(&cpsw->data, pdev); + if (ret) + goto clean_dt_ret; + data = &cpsw->data; cpsw->rx_ch_num = 1; cpsw->tx_ch_num = 1; @@ -2608,7 +2651,7 @@ static int cpsw_probe(struct platform_device *pdev) GFP_KERNEL); if (!cpsw->slaves) { ret = -ENOMEM; - goto clean_runtime_disable_ret; + goto clean_dt_ret; } for (i = 0; i < data->slaves; i++) cpsw->slaves[i].slave_num = i; @@ -2620,7 +2663,7 @@ static int cpsw_probe(struct platform_device *pdev) if (IS_ERR(clk)) { dev_err(priv->dev, "fck is not found\n"); ret = -ENODEV; - goto clean_runtime_disable_ret; + goto clean_dt_ret; } cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; @@ -2628,26 +2671,17 @@ static int cpsw_probe(struct platform_device *pdev) ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); if (IS_ERR(ss_regs)) { ret = PTR_ERR(ss_regs); - goto clean_runtime_disable_ret; + goto clean_dt_ret; } cpsw->regs = ss_regs; - /* Need to enable clocks with runtime PM api to access module - * registers - */ - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - pm_runtime_put_noidle(&pdev->dev); - goto clean_runtime_disable_ret; - } cpsw->version = readl(&cpsw->regs->id_ver); - pm_runtime_put_sync(&pdev->dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(cpsw->wr_regs)) { ret = PTR_ERR(cpsw->wr_regs); - goto clean_runtime_disable_ret; + goto clean_dt_ret; } memset(&dma_params, 0, sizeof(dma_params)); @@ -2684,7 +2718,7 @@ static int cpsw_probe(struct platform_device *pdev) default: dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); ret = -ENODEV; - goto clean_runtime_disable_ret; + goto clean_dt_ret; } for (i = 0; i < cpsw->data.slaves; i++) { struct cpsw_slave *slave = &cpsw->slaves[i]; @@ -2713,7 +2747,7 @@ static int cpsw_probe(struct platform_device *pdev) if (!cpsw->dma) { dev_err(priv->dev, "error initializing dma\n"); ret = -ENOMEM; - goto clean_runtime_disable_ret; + goto clean_dt_ret; } cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); @@ -2811,16 +2845,23 @@ static int cpsw_probe(struct platform_device *pdev) ret = cpsw_probe_dual_emac(priv); if (ret) { cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); - goto clean_ale_ret; + goto clean_unregister_netdev_ret; } } + pm_runtime_put(&pdev->dev); + return 0; +clean_unregister_netdev_ret: + unregister_netdev(ndev); clean_ale_ret: cpsw_ale_destroy(cpsw->ale); clean_dma_ret: cpdma_ctlr_destroy(cpsw->dma); +clean_dt_ret: + cpsw_remove_dt(pdev); + pm_runtime_put_sync(&pdev->dev); clean_runtime_disable_ret: pm_runtime_disable(&pdev->dev); clean_ndev_ret: @@ -2846,7 +2887,7 @@ static int cpsw_remove(struct platform_device *pdev) cpsw_ale_destroy(cpsw->ale); cpdma_ctlr_destroy(cpsw->dma); - of_platform_depopulate(&pdev->dev); + cpsw_remove_dt(pdev); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); if (cpsw->data.dual_emac) @@ -2889,6 +2930,8 @@ static int cpsw_resume(struct device *dev) /* Select default pin state */ pinctrl_pm_select_default_state(dev); + /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */ + rtnl_lock(); if (cpsw->data.dual_emac) { int i; @@ -2900,6 +2943,8 @@ static int cpsw_resume(struct device *dev) if (netif_running(ndev)) cpsw_ndo_open(ndev); } + rtnl_unlock(); + return 0; } #endif diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 2fd94a5bc1f3..481c7bf0395b 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev) int i = 0; struct emac_priv *priv = netdev_priv(ndev); struct phy_device *phydev = NULL; + struct device *phy = NULL; ret = pm_runtime_get_sync(&priv->pdev->dev); if (ret < 0) { @@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev) /* use the first phy on the bus if pdata did not give us a phy id */ if (!phydev && !priv->phy_id) { - struct device *phy; - phy = bus_find_device(&mdio_bus_type, NULL, NULL, match_first_device); - if (phy) + if (phy) { priv->phy_id = dev_name(phy); + if (!priv->phy_id || !*priv->phy_id) + put_device(phy); + } } if (!phydev && priv->phy_id && *priv->phy_id) { phydev = phy_connect(ndev, priv->phy_id, &emac_adjust_link, PHY_INTERFACE_MODE_MII); - + put_device(phy); /* reference taken by bus_find_device */ if (IS_ERR(phydev)) { dev_err(emac_dev, "could not connect to phy %s\n", priv->phy_id); @@ -1765,6 +1767,7 @@ static int davinci_emac_try_get_mac(struct platform_device *pdev, */ static int davinci_emac_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; int rc = 0; struct resource *res, *res_ctrl; struct net_device *ndev; @@ -1803,7 +1806,7 @@ static int davinci_emac_probe(struct platform_device *pdev) if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); rc = -ENODEV; - goto no_pdata; + goto err_free_netdev; } /* MAC addr and PHY mask , RMII enable info from platform_data */ @@ -1939,6 +1942,10 @@ no_cpdma_chan: cpdma_chan_destroy(priv->rxchan); cpdma_ctlr_destroy(priv->dma); no_pdata: + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); + of_node_put(priv->phy_node); +err_free_netdev: free_netdev(ndev); return rc; } @@ -1954,6 +1961,7 @@ static int davinci_emac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct emac_priv *priv = netdev_priv(ndev); + struct device_node *np = pdev->dev.of_node; dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); @@ -1966,6 +1974,8 @@ static int davinci_emac_remove(struct platform_device *pdev) unregister_netdev(ndev); of_node_put(priv->phy_node); pm_runtime_disable(&pdev->dev); + if (of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 446ea580ad42..928c1dca2673 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl) pr_debug("%s: bssid matched\n", __func__); break; } else { - pr_debug("%s: bssid unmached\n", __func__); + pr_debug("%s: bssid unmatched\n", __func__); continue; } } diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 7f127dc1b7ba..fa32391720fe 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget) if (!qmgr_stat_below_low_watermark(rxq) && napi_reschedule(napi)) { /* not empty again */ #if DEBUG_RX - printk(KERN_DEBUG "%s: eth_poll" - " napi_reschedule successed\n", + printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n", dev->name); #endif qmgr_disable_irq(rxq); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 42edd7b7902f..8b4822ad27cb 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -859,7 +859,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs4; struct rtable *rt = NULL; - const struct iphdr *iip; /* interior IP header */ int err = -EINVAL; struct flowi4 fl4; __u8 tos, ttl; @@ -890,8 +889,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); skb_reset_mac_header(skb); - iip = ip_hdr(skb); - if (info) { const struct ip_tunnel_key *key = &info->key; u8 *opts = NULL; @@ -911,7 +908,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (unlikely(err)) goto tx_error; - tos = ip_tunnel_ecn_encap(key->tos, iip, skb); + tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { @@ -920,7 +917,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (unlikely(err)) goto tx_error; - tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); + tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); ttl = geneve->ttl; if (!ttl && IN_MULTICAST(ntohl(fl4.daddr))) ttl = 1; @@ -952,7 +949,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, { struct geneve_dev *geneve = netdev_priv(dev); struct dst_entry *dst = NULL; - const struct iphdr *iip; /* interior IP header */ struct geneve_sock *gs6; int err = -EINVAL; struct flowi6 fl6; @@ -982,8 +978,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); skb_reset_mac_header(skb); - iip = ip_hdr(skb); - if (info) { const struct ip_tunnel_key *key = &info->key; u8 *opts = NULL; @@ -1004,7 +998,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (unlikely(err)) goto tx_error; - prio = ip_tunnel_ecn_encap(key->tos, iip, skb); + prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; label = info->key.label; } else { @@ -1014,7 +1008,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, goto tx_error; prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), - iip, skb); + ip_hdr(skb), skb); ttl = geneve->ttl; if (!ttl && ipv6_addr_is_multicast(&fl6.daddr)) ttl = 1; diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 9fa7ac9f8e68..f355df7cf84a 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index f442eb366863..0fef17874d50 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -497,6 +497,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, struct net_device *phy_dev; int err; u16 mode = IPVLAN_MODE_L3; + bool create = false; if (!tb[IFLA_LINK]) return -EINVAL; @@ -513,6 +514,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, err = ipvlan_port_create(phy_dev); if (err < 0) return err; + create = true; } if (data && data[IFLA_IPVLAN_MODE]) @@ -536,22 +538,27 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, err = register_netdevice(dev); if (err < 0) - return err; + goto destroy_ipvlan_port; err = netdev_upper_dev_link(phy_dev, dev); if (err) { - unregister_netdevice(dev); - return err; + goto unregister_netdev; } err = ipvlan_set_port_mode(port, mode); if (err) { - unregister_netdevice(dev); - return err; + goto unregister_netdev; } list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans); netif_stacked_transfer_operstate(phy_dev, dev); return 0; + +unregister_netdev: + unregister_netdevice(dev); +destroy_ipvlan_port: + if (create) + ipvlan_port_destroy(phy_dev); + return err; } static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c index 4e3d2e7c697c..e8c3a8c32534 100644 --- a/drivers/net/irda/w83977af_ir.c +++ b/drivers/net/irda/w83977af_ir.c @@ -518,7 +518,9 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb, mtt = irda_get_mtt(skb); pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); - if (mtt) + if (mtt > 1000) + mdelay(mtt/1000); + else if (mtt) udelay(mtt); /* Enable DMA interrupt */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 3234fcdea317..26d6f0bbe14b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -623,7 +623,8 @@ hash_add: return 0; clear_multi: - dev_set_allmulti(lowerdev, -1); + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(lowerdev, -1); del_unicast: dev_uc_del(lowerdev, dev->dev_addr); out: @@ -1278,6 +1279,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, struct net_device *lowerdev; int err; int macmode; + bool create = false; if (!tb[IFLA_LINK]) return -EINVAL; @@ -1304,12 +1306,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, err = macvlan_port_create(lowerdev); if (err < 0) return err; + create = true; } port = macvlan_port_get_rtnl(lowerdev); /* Only 1 macvlan device can be created in passthru mode */ - if (port->passthru) - return -EINVAL; + if (port->passthru) { + /* The macvlan port must be not created this time, + * still goto destroy_macvlan_port for readability. + */ + err = -EINVAL; + goto destroy_macvlan_port; + } vlan->lowerdev = lowerdev; vlan->dev = dev; @@ -1325,24 +1333,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); if (vlan->mode == MACVLAN_MODE_PASSTHRU) { - if (port->count) - return -EINVAL; + if (port->count) { + err = -EINVAL; + goto destroy_macvlan_port; + } port->passthru = true; eth_hw_addr_inherit(dev, lowerdev); } if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { - if (vlan->mode != MACVLAN_MODE_SOURCE) - return -EINVAL; + if (vlan->mode != MACVLAN_MODE_SOURCE) { + err = -EINVAL; + goto destroy_macvlan_port; + } macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); err = macvlan_changelink_sources(vlan, macmode, data); if (err) - return err; + goto destroy_macvlan_port; } err = register_netdevice(dev); if (err < 0) - return err; + goto destroy_macvlan_port; dev->priv_flags |= IFF_MACVLAN; err = netdev_upper_dev_link(lowerdev, dev); @@ -1357,7 +1369,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, unregister_netdev: unregister_netdevice(dev); - +destroy_macvlan_port: + if (create) + macvlan_port_destroy(port->dev); return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 070e3290aa6e..7869b0651576 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -491,7 +491,13 @@ static int macvtap_newlink(struct net *src_net, /* Don't put anything that may fail after macvlan_common_newlink * because we can't undo what it does. */ - return macvlan_common_newlink(src_net, dev, tb, data); + err = macvlan_common_newlink(src_net, dev, tb, data); + if (err) { + netdev_rx_handler_unregister(dev); + return err; + } + + return 0; } static void macvtap_dellink(struct net_device *dev, @@ -736,13 +742,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (zerocopy) err = zerocopy_sg_from_iter(skb, from); - else { + else err = skb_copy_datagram_from_iter(skb, 0, from, len); - if (!err && m && m->msg_control) { - struct ubuf_info *uarg = m->msg_control; - uarg->callback(uarg, false); - } - } if (err) goto err_kfree; @@ -773,7 +774,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, skb_shinfo(skb)->destructor_arg = m->msg_control; skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; + } else if (m && m->msg_control) { + struct ubuf_info *uarg = m->msg_control; + uarg->callback(uarg, false); } + if (vlan) { skb->dev = vlan->dev; dev_queue_xmit(skb); diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index c649c101bbab..eb5167210681 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -279,7 +279,7 @@ EXPORT_SYMBOL_GPL(fixed_phy_register); void fixed_phy_unregister(struct phy_device *phy) { phy_device_remove(phy); - + of_node_put(phy->mdio.dev.of_node); fixed_phy_del(phy->mdio.addr); } EXPORT_SYMBOL_GPL(fixed_phy_unregister); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 081df68d2ce1..ea92d524d5a8 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -318,12 +318,12 @@ static int ksz8041_config_init(struct phy_device *phydev) /* Limit supported and advertised modes in fiber mode */ if (of_property_read_bool(of_node, "micrel,fiber-mode")) { phydev->dev_flags |= MICREL_PHY_FXEN; - phydev->supported &= SUPPORTED_FIBRE | - SUPPORTED_100baseT_Full | + phydev->supported &= SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half; - phydev->advertising &= ADVERTISED_FIBRE | - ADVERTISED_100baseT_Full | + phydev->supported |= SUPPORTED_FIBRE; + phydev->advertising &= ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half; + phydev->advertising |= ADVERTISED_FIBRE; phydev->autoneg = AUTONEG_DISABLE; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index e977ba931878..1a4bf8acad78 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -723,6 +723,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id, phydev = to_phy_device(d); rc = phy_connect_direct(dev, phydev, handler, interface); + put_device(d); if (rc) return ERR_PTR(rc); @@ -953,6 +954,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, phydev = to_phy_device(d); rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); + put_device(d); if (rc) return ERR_PTR(rc); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index aadd6e9f54ad..9cbe645e3d89 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -102,15 +102,19 @@ static int rtl8211f_config_init(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { - /* enable TXDLY */ - phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); - reg = phy_read(phydev, 0x11); + phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); + reg = phy_read(phydev, 0x11); + + /* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */ + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) reg |= RTL8211F_TX_DELAY; - phy_write(phydev, 0x11, reg); - /* restore to default page 0 */ - phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); - } + else + reg &= ~RTL8211F_TX_DELAY; + + phy_write(phydev, 0x11, reg); + /* restore to default page 0 */ + phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); return 0; } diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 2e37eb337d48..24b4a09468dd 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -62,6 +62,10 @@ /* Vitesse Extended Page Access Register */ #define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f +/* Vitesse VSC8601 Extended PHY Control Register 1 */ +#define MII_VSC8601_EPHY_CTL 0x17 +#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8) + #define PHY_ID_VSC8234 0x000fc620 #define PHY_ID_VSC8244 0x000fc6c0 #define PHY_ID_VSC8514 0x00070670 @@ -111,6 +115,34 @@ static int vsc824x_config_init(struct phy_device *phydev) return err; } +/* This adds a skew for both TX and RX clocks, so the skew should only be + * applied to "rgmii-id" interfaces. It may not work as expected + * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */ +static int vsc8601_add_skew(struct phy_device *phydev) +{ + int ret; + + ret = phy_read(phydev, MII_VSC8601_EPHY_CTL); + if (ret < 0) + return ret; + + ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW; + return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret); +} + +static int vsc8601_config_init(struct phy_device *phydev) +{ + int ret = 0; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + ret = vsc8601_add_skew(phydev); + + if (ret < 0) + return ret; + + return genphy_config_init(phydev); +} + static int vsc824x_ack_interrupt(struct phy_device *phydev) { int err = 0; @@ -275,7 +307,7 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, - .config_init = &genphy_config_init, + .config_init = &vsc8601_config_init, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, .ack_interrupt = &vsc824x_ack_interrupt, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8093e39ae263..db6acecabeaa 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1246,13 +1246,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, if (zerocopy) err = zerocopy_sg_from_iter(skb, from); - else { + else err = skb_copy_datagram_from_iter(skb, 0, from, len); - if (!err && msg_control) { - struct ubuf_info *uarg = msg_control; - uarg->callback(uarg, false); - } - } if (err) { this_cpu_inc(tun->pcpu_stats->rx_dropped); @@ -1298,6 +1293,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_shinfo(skb)->destructor_arg = msg_control; skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; + } else if (msg_control) { + struct ubuf_info *uarg = msg_control; + uarg->callback(uarg, false); } skb_reset_network_header(skb); diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index cce24950a0ab..dc7b6392e75a 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -603,12 +603,12 @@ static void ax88772_suspend(struct usbnet *dev) u16 medium; /* Stop MAC operation */ - medium = asix_read_medium_status(dev, 0); + medium = asix_read_medium_status(dev, 1); medium &= ~AX_MEDIUM_RE; - asix_write_medium_mode(dev, medium, 0); + asix_write_medium_mode(dev, medium, 1); netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n", - asix_read_medium_status(dev, 0)); + asix_read_medium_status(dev, 1)); /* Preserve BMCR for restoring */ priv->presvd_phy_bmcr = diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index e6338c16081a..8a6675d92b98 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1656,6 +1656,19 @@ static const struct driver_info ax88178a_info = { .tx_fixup = ax88179_tx_fixup, }; +static const struct driver_info cypress_GX3_info = { + .description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller", + .bind = ax88179_bind, + .unbind = ax88179_unbind, + .status = ax88179_status, + .link_reset = ax88179_link_reset, + .reset = ax88179_reset, + .stop = ax88179_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .rx_fixup = ax88179_rx_fixup, + .tx_fixup = ax88179_tx_fixup, +}; + static const struct driver_info dlink_dub1312_info = { .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", .bind = ax88179_bind, @@ -1717,6 +1730,10 @@ static const struct usb_device_id products[] = { /* ASIX AX88178A 10/100/1000 */ USB_DEVICE(0x0b95, 0x178a), .driver_info = (unsigned long)&ax88178a_info, +}, { + /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */ + USB_DEVICE(0x04b4, 0x3610), + .driver_info = (unsigned long)&cypress_GX3_info, }, { /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ USB_DEVICE(0x2001, 0x4a00), diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index c47ec0a04c8e..dd623f674487 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -388,12 +388,6 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb) case USB_CDC_NOTIFY_NETWORK_CONNECTION: netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", event->wValue ? "on" : "off"); - - /* Work-around for devices with broken off-notifications */ - if (event->wValue && - !test_bit(__LINK_STATE_NOCARRIER, &dev->net->state)) - usbnet_link_change(dev, 0, 0); - usbnet_link_change(dev, !!event->wValue, 0); break; case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ @@ -466,6 +460,36 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) return 1; } +/* Ensure correct link state + * + * Some devices (ZTE MF823/831/910) export two carrier on notifications when + * connected. This causes the link state to be incorrect. Work around this by + * always setting the state to off, then on. + */ +void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb) +{ + struct usb_cdc_notification *event; + + if (urb->actual_length < sizeof(*event)) + return; + + event = urb->transfer_buffer; + + if (event->bNotificationType != USB_CDC_NOTIFY_NETWORK_CONNECTION) { + usbnet_cdc_status(dev, urb); + return; + } + + netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", + event->wValue ? "on" : "off"); + + if (event->wValue && + netif_carrier_ok(dev->net)) + netif_carrier_off(dev->net); + + usbnet_link_change(dev, !!event->wValue, 0); +} + static const struct driver_info cdc_info = { .description = "CDC Ethernet Device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT, @@ -481,7 +505,7 @@ static const struct driver_info zte_cdc_info = { .flags = FLAG_ETHER | FLAG_POINTTOPOINT, .bind = usbnet_cdc_zte_bind, .unbind = usbnet_cdc_unbind, - .status = usbnet_cdc_status, + .status = usbnet_cdc_zte_status, .set_rx_mode = usbnet_cdc_update_filter, .manage_power = usbnet_manage_power, .rx_fixup = usbnet_cdc_zte_rx_fixup, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 3ff76c6db4f6..6fe1cdb0174f 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -894,6 +894,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 44d439f50961..efb84f092492 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) u8 checksum = CHECKSUM_NONE; u32 opts2, opts3; - if (tp->version == RTL_VER_01) + if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) goto return_result; opts2 = le32_to_cpu(rx_desc->opts2); @@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) checksum = CHECKSUM_NONE; else checksum = CHECKSUM_UNNECESSARY; - } else if (RD_IPV6_CS) { + } else if (opts2 & RD_IPV6_CS) { if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) checksum = CHECKSUM_UNNECESSARY; else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) @@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev) goto out; res = usb_autopm_get_interface(tp->intf); - if (res < 0) { - free_all_mem(tp); - goto out; - } + if (res < 0) + goto out_free; mutex_lock(&tp->control); @@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev) netif_device_detach(tp->netdev); netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", res); - free_all_mem(tp); - } else { - napi_enable(&tp->napi); + goto out_unlock; } + napi_enable(&tp->napi); mutex_unlock(&tp->control); @@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev) tp->pm_notifier.notifier_call = rtl_notifier; register_pm_notifier(&tp->pm_notifier); #endif + return 0; +out_unlock: + mutex_unlock(&tp->control); + usb_autopm_put_interface(tp->intf); +out_free: + free_all_mem(tp); out: return res; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fad84f3f4109..7276d5a95bd0 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1497,6 +1497,11 @@ static void virtnet_free_queues(struct virtnet_info *vi) netif_napi_del(&vi->rq[i].napi); } + /* We called napi_hash_del() before netif_napi_del(), + * we need to respect an RCU grace period before freeing vi->rq + */ + synchronize_net(); + kfree(vi->rq); kfree(vi->sq); } @@ -2038,23 +2043,33 @@ static struct virtio_device_id id_table[] = { { 0 }, }; +#define VIRTNET_FEATURES \ + VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ + VIRTIO_NET_F_MAC, \ + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ + VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ + VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ + VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ + VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ + VIRTIO_NET_F_CTRL_MAC_ADDR, \ + VIRTIO_NET_F_MTU + static unsigned int features[] = { - VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, - VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, - VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, - VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, - VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, - VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, - VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, - VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, - VIRTIO_NET_F_CTRL_MAC_ADDR, + VIRTNET_FEATURES, +}; + +static unsigned int features_legacy[] = { + VIRTNET_FEATURES, + VIRTIO_NET_F_GSO, VIRTIO_F_ANY_LAYOUT, - VIRTIO_NET_F_MTU, }; static struct virtio_driver virtio_net_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), + .feature_table_legacy = features_legacy, + .feature_table_size_legacy = ARRAY_SIZE(features_legacy), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f3c2fa3ab0d5..2ba01ca02c9c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -611,6 +611,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, struct vxlan_rdst *rd = NULL; struct vxlan_fdb *f; int notify = 0; + int rc; f = __vxlan_find_mac(vxlan, mac); if (f) { @@ -641,8 +642,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if ((flags & NLM_F_APPEND) && (is_multicast_ether_addr(f->eth_addr) || is_zero_ether_addr(f->eth_addr))) { - int rc = vxlan_fdb_append(f, ip, port, vni, ifindex, - &rd); + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); if (rc < 0) return rc; @@ -673,7 +673,11 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, INIT_LIST_HEAD(&f->remotes); memcpy(f->eth_addr, mac, ETH_ALEN); - vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); + if (rc < 0) { + kfree(f); + return rc; + } ++vxlan->addrcnt; hlist_add_head_rcu(&f->hlist, @@ -944,7 +948,9 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) { struct vxlan_dev *vxlan; struct vxlan_sock *sock4; - struct vxlan_sock *sock6 = NULL; +#if IS_ENABLED(CONFIG_IPV6) + struct vxlan_sock *sock6; +#endif unsigned short family = dev->default_dst.remote_ip.sa.sa_family; sock4 = rtnl_dereference(dev->vn4_sock); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index b777e1b2f87a..78d9966a3957 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, /* store current 11d setting */ if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, &ifp->vif->is_11d)) { - supports_11d = false; + is_11d = supports_11d = false; } else { country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, settings->beacon.tail_len, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 4fdc3dad3e85..b88e2048ae0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, ret = iwl_mvm_switch_to_d3(mvm); if (ret) return ret; + } else { + /* In theory, we wouldn't have to stop a running sched + * scan in order to start another one (for + * net-detect). But in practice this doesn't seem to + * work properly, so stop any running sched_scan now. + */ + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); + if (ret) + return ret; } /* rfkill release can be either for wowlan or netdetect */ @@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, out: if (ret < 0) { iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); - ieee80211_restart_hw(mvm->hw); + if (mvm->restart_fw > 0) { + mvm->restart_fw--; + ieee80211_restart_hw(mvm->hw); + } iwl_mvm_free_nd(mvm); } out_noreset: @@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) iwl_mvm_update_changed_regdom(mvm); if (mvm->net_detect) { + /* If this is a non-unified image, we restart the FW, + * so no need to stop the netdetect scan. If that + * fails, continue and try to get the wake-up reasons, + * but trigger a HW restart by keeping a failure code + * in ret. + */ + if (unified_image) + ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, + false); + iwl_mvm_query_netdetect_reasons(mvm, vif); /* has unlocked the mutex, so skip that */ goto out; @@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) { struct iwl_mvm *mvm = inode->i_private; - int remaining_time = 10; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); mvm->d3_test_active = false; @@ -2282,18 +2305,22 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; iwl_abort_notification_waits(&mvm->notif_wait); - ieee80211_restart_hw(mvm->hw); + if (!unified_image) { + int remaining_time = 10; - /* wait for restart and disconnect all interfaces */ - while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - remaining_time > 0) { - remaining_time--; - msleep(1000); + ieee80211_restart_hw(mvm->hw); + + /* wait for restart and disconnect all interfaces */ + while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + remaining_time > 0) { + remaining_time--; + msleep(1000); + } + + if (remaining_time == 0) + IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); } - if (remaining_time == 0) - IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); - ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 07da4efe8458..7b7d2a146e30 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, .data = { &cmd, }, .len = { sizeof(cmd) }, }; - size_t delta, len; - ssize_t ret; + size_t delta; + ssize_t ret, len; hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, DEBUG_GROUP, 0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 318efd814037..1db1dc13e988 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, struct iwl_mvm_internal_rxq_notif *notif, u32 size) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq); u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; int ret; @@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, } if (notif->sync) - ret = wait_event_timeout(notif_waitq, + ret = wait_event_timeout(mvm->rx_sync_waitq, atomic_read(&mvm->queue_sync_counter) == 0, HZ); WARN_ON_ONCE(!ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index d17cbf603f7c..c60703e0c246 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -937,6 +937,7 @@ struct iwl_mvm { /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ spinlock_t d0i3_tx_lock; wait_queue_head_t d0i3_exit_waitq; + wait_queue_head_t rx_sync_waitq; /* BT-Coex */ struct iwl_bt_coex_profile_notif last_bt_notif; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 05fe6dd1a2c8..4d35deb628bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, spin_lock_init(&mvm->refs_lock); skb_queue_head_init(&mvm->d0i3_tx); init_waitqueue_head(&mvm->d0i3_exit_waitq); + init_waitqueue_head(&mvm->rx_sync_waitq); atomic_set(&mvm->queue_sync_counter, 0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index a57c6ef5bc14..6c802cee900c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, "Received expired RX queue sync message\n"); return; } - atomic_dec(&mvm->queue_sync_counter); + if (!atomic_dec_return(&mvm->queue_sync_counter)) + wake_up(&mvm->rx_sync_waitq); } switch (internal_notif->type) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index f279fdd6eb44..fa9743205491 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm) static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) { + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + /* This looks a bit arbitrary, but the idea is that if we run * out of possible simultaneous scans and the userspace is * trying to run a scan type that is already running, we @@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) return -EBUSY; return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); case IWL_MVM_SCAN_NETDETECT: - /* No need to stop anything for net-detect since the - * firmware is restarted anyway. This way, any sched - * scans that were running will be restarted when we - * resume. - */ - return 0; + /* For non-unified images, there's no need to stop + * anything for net-detect since the firmware is + * restarted anyway. This way, any sched scans that + * were running will be restarted when we resume. + */ + if (!unified_image) + return 0; + + /* If this is a unified image and we ran out of scans, + * we need to stop something. Prefer stopping regular + * scans, because the results are useless at this + * point, and we should be able to keep running + * another scheduled scan while suspended. + */ + if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, + true); + if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) + return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, + true); + + /* fall through, something is wrong if no scan was + * running but we ran out of scans. + */ default: WARN_ON(1); break; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 001be406a3d3..2f8134b2a504 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = { MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); #ifdef CONFIG_ACPI -#define SPL_METHOD "SPLC" -#define SPL_DOMAINTYPE_MODULE BIT(0) -#define SPL_DOMAINTYPE_WIFI BIT(1) -#define SPL_DOMAINTYPE_WIGIG BIT(2) -#define SPL_DOMAINTYPE_RFEM BIT(3) +#define ACPI_SPLC_METHOD "SPLC" +#define ACPI_SPLC_DOMAIN_WIFI (0x07) -static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) +static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc) { - union acpi_object *limits, *domain_type, *power_limit; + union acpi_object *data_pkg, *dflt_pwr_limit; + int i; - if (splx->type != ACPI_TYPE_PACKAGE || - splx->package.count != 2 || - splx->package.elements[0].type != ACPI_TYPE_INTEGER || - splx->package.elements[0].integer.value != 0) { - IWL_ERR(trans, "Unsupported splx structure\n"); + /* We need at least two elements, one for the revision and one + * for the data itself. Also check that the revision is + * supported (currently only revision 0). + */ + if (splc->type != ACPI_TYPE_PACKAGE || + splc->package.count < 2 || + splc->package.elements[0].type != ACPI_TYPE_INTEGER || + splc->package.elements[0].integer.value != 0) { + IWL_DEBUG_INFO(trans, + "Unsupported structure returned by the SPLC method. Ignoring.\n"); return 0; } - limits = &splx->package.elements[1]; - if (limits->type != ACPI_TYPE_PACKAGE || - limits->package.count < 2 || - limits->package.elements[0].type != ACPI_TYPE_INTEGER || - limits->package.elements[1].type != ACPI_TYPE_INTEGER) { - IWL_ERR(trans, "Invalid limits element\n"); + /* loop through all the packages to find the one for WiFi */ + for (i = 1; i < splc->package.count; i++) { + union acpi_object *domain; + + data_pkg = &splc->package.elements[i]; + + /* Skip anything that is not a package with the right + * amount of elements (i.e. at least 2 integers). + */ + if (data_pkg->type != ACPI_TYPE_PACKAGE || + data_pkg->package.count < 2 || + data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || + data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) + continue; + + domain = &data_pkg->package.elements[0]; + if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI) + break; + + data_pkg = NULL; + } + + if (!data_pkg) { + IWL_DEBUG_INFO(trans, + "No element for the WiFi domain returned by the SPLC method.\n"); return 0; } - domain_type = &limits->package.elements[0]; - power_limit = &limits->package.elements[1]; - if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { - IWL_DEBUG_INFO(trans, "WiFi power is not limited\n"); - return 0; - } - - return power_limit->integer.value; + dflt_pwr_limit = &data_pkg->package.elements[1]; + return dflt_pwr_limit->integer.value; } static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) { acpi_handle pxsx_handle; acpi_handle handle; - struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; + struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; pxsx_handle = ACPI_HANDLE(&pdev->dev); @@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) } /* Get the method's handle */ - status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); + status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD, + &handle); if (ACPI_FAILURE(status)) { - IWL_DEBUG_INFO(trans, "SPL method not found\n"); + IWL_DEBUG_INFO(trans, "SPLC method not found\n"); return; } /* Call SPLC with no arguments */ - status = acpi_evaluate_object(handle, NULL, NULL, &splx); + status = acpi_evaluate_object(handle, NULL, NULL, &splc); if (ACPI_FAILURE(status)) { IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); return; } - trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); + trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer); IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", trans->dflt_pwr_limit); - kfree(splx.pointer); + kfree(splc.pointer); } #else /* CONFIG_ACPI */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index e9a278b60dfd..5f840f16f40b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -592,6 +592,7 @@ error: static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, u32 txq_id) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; txq->need_update = false; @@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, return ret; spin_lock_init(&txq->lock); + + if (txq_id == trans_pcie->cmd_queue) { + static struct lock_class_key iwl_pcie_cmd_queue_lock_class; + + lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); + } + __skb_queue_head_init(&txq->overflow_q); /* diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 431f13b4faf6..d3bad5779376 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -826,7 +826,7 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw, data->bcn_delta = do_div(delta, bcn_int); } else { data->tsf_offset -= delta; - data->bcn_delta = -do_div(delta, bcn_int); + data->bcn_delta = -(s64)do_div(delta, bcn_int); } } diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 39ce76ad00bc..16241d21727b 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2222,8 +2222,9 @@ done: is_scanning_required = 1; } else { mwifiex_dbg(priv->adapter, MSG, - "info: trying to associate to '%s' bssid %pM\n", - (char *)req_ssid.ssid, bss->bssid); + "info: trying to associate to '%.*s' bssid %pM\n", + req_ssid.ssid_len, (char *)req_ssid.ssid, + bss->bssid); memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); break; } @@ -2283,8 +2284,8 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, } mwifiex_dbg(adapter, INFO, - "info: Trying to associate to %s and bssid %pM\n", - (char *)sme->ssid, sme->bssid); + "info: Trying to associate to %.*s and bssid %pM\n", + (int)sme->ssid_len, (char *)sme->ssid, sme->bssid); if (!mwifiex_stop_bg_scan(priv)) cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); @@ -2417,8 +2418,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, } mwifiex_dbg(priv->adapter, MSG, - "info: trying to join to %s and bssid %pM\n", - (char *)params->ssid, params->bssid); + "info: trying to join to %.*s and bssid %pM\n", + params->ssid_len, (char *)params->ssid, params->bssid); mwifiex_set_ibss_params(priv, params); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e17879dd5d5a..bf2744e1e3db 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) queue->rx_skbs[id] = skb; ref = gnttab_claim_grant_reference(&queue->gref_rx_head); - BUG_ON((signed short)ref < 0); + WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); queue->grant_rx_ref[id] = ref; page = skb_frag_page(&skb_shinfo(skb)->frags[0]); @@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); ref = gnttab_claim_grant_reference(&queue->gref_tx_head); - BUG_ON((signed short)ref < 0); + WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gfn, GNTMAP_readonly); diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index 0d5c29ae51de..7310a261c858 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c @@ -112,17 +112,17 @@ MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, module_param_named(xeon_b2b_usd_bar4_addr64, xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64, "XEON B2B USD BAR 4 64-bit address"); module_param_named(xeon_b2b_usd_bar4_addr32, xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32, "XEON B2B USD split-BAR 4 32-bit address"); module_param_named(xeon_b2b_usd_bar5_addr32, xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32, "XEON B2B USD split-BAR 5 32-bit address"); module_param_named(xeon_b2b_dsd_bar2_addr64, @@ -132,17 +132,17 @@ MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, module_param_named(xeon_b2b_dsd_bar4_addr64, xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64, "XEON B2B DSD BAR 4 64-bit address"); module_param_named(xeon_b2b_dsd_bar4_addr32, xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32, "XEON B2B DSD split-BAR 4 32-bit address"); module_param_named(xeon_b2b_dsd_bar5_addr32, xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); -MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, +MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32, "XEON B2B DSD split-BAR 5 32-bit address"); #ifndef ioread64 @@ -1755,6 +1755,8 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, XEON_B2B_MIN_SIZE); if (!ndev->peer_mmio) return -EIO; + + ndev->peer_addr = pci_resource_start(pdev, b2b_bar); } return 0; @@ -2019,6 +2021,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) goto err_mmio; } ndev->peer_mmio = ndev->self_mmio; + ndev->peer_addr = pci_resource_start(pdev, 0); return 0; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 8601c10acf74..4eb8adb34508 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -257,7 +257,7 @@ enum { #define NTB_QP_DEF_NUM_ENTRIES 100 #define NTB_LINK_DOWN_TIMEOUT 10 #define DMA_RETRIES 20 -#define DMA_OUT_RESOURCE_TO 50 +#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) static void ntb_transport_rxc_db(unsigned long data); static const struct ntb_ctx_ops ntb_transport_ops; diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 6a50f20bf1cd..e75d4fdc0866 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -72,7 +72,7 @@ #define MAX_THREADS 32 #define MAX_TEST_SIZE SZ_1M #define MAX_SRCS 32 -#define DMA_OUT_RESOURCE_TO 50 +#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) #define DMA_RETRIES 20 #define SZ_4G (1ULL << 32) #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ @@ -589,7 +589,7 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, return -ENOMEM; if (mutex_is_locked(&perf->run_mutex)) { - out_off = snprintf(buf, 64, "running\n"); + out_off = scnprintf(buf, 64, "running\n"); goto read_from_buf; } @@ -600,14 +600,14 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, break; if (pctx->status) { - out_off += snprintf(buf + out_off, 1024 - out_off, + out_off += scnprintf(buf + out_off, 1024 - out_off, "%d: error %d\n", i, pctx->status); continue; } rate = div64_u64(pctx->copied, pctx->diff_us); - out_off += snprintf(buf + out_off, 1024 - out_off, + out_off += scnprintf(buf + out_off, 1024 - out_off, "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", i, pctx->copied, pctx->diff_us, rate); } diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c index 7d311799fca1..435861189d97 100644 --- a/drivers/ntb/test/ntb_pingpong.c +++ b/drivers/ntb/test/ntb_pingpong.c @@ -88,7 +88,7 @@ MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer"); static unsigned long db_init = 0x7; module_param(db_init, ulong, 0644); -MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer"); +MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer"); struct pp_ctx { struct ntb_dev *ntb; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0248d0e21fee..5e52034ab010 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1242,20 +1242,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) result = nvme_enable_ctrl(&dev->ctrl, cap); if (result) - goto free_nvmeq; + return result; nvmeq->cq_vector = 0; result = queue_request_irq(nvmeq); if (result) { nvmeq->cq_vector = -1; - goto free_nvmeq; + return result; } return result; - - free_nvmeq: - nvme_free_queues(dev, 0); - return result; } static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) @@ -1317,10 +1313,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev) max = min(dev->max_qid, dev->queue_count - 1); for (i = dev->online_queues; i <= max; i++) { ret = nvme_create_queue(dev->queues[i], i); - if (ret) { - nvme_free_queues(dev, i); + if (ret) break; - } } /* @@ -1460,13 +1454,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) result = queue_request_irq(adminq); if (result) { adminq->cq_vector = -1; - goto free_queues; + return result; } return nvme_create_io_queues(dev); - - free_queues: - nvme_free_queues(dev, 1); - return result; } static void nvme_del_queue_end(struct request *req, int error) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 5a8388177959..3d25add36d91 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -83,6 +83,7 @@ enum nvme_rdma_queue_flags { NVME_RDMA_Q_CONNECTED = (1 << 0), NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), NVME_RDMA_Q_DELETING = (1 << 2), + NVME_RDMA_Q_LIVE = (1 << 3), }; struct nvme_rdma_queue { @@ -624,10 +625,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl) for (i = 1; i < ctrl->queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); - if (ret) - break; + if (ret) { + dev_info(ctrl->ctrl.device, + "failed to connect i/o queue: %d\n", ret); + goto out_free_queues; + } + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); } + return 0; + +out_free_queues: + nvme_rdma_free_io_queues(ctrl); return ret; } @@ -712,6 +721,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) if (ret) goto stop_admin_q; + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); + ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); if (ret) goto stop_admin_q; @@ -761,8 +772,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) nvme_stop_keep_alive(&ctrl->ctrl); - for (i = 0; i < ctrl->queue_count; i++) + for (i = 0; i < ctrl->queue_count; i++) { clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); + clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); + } if (ctrl->queue_count > 1) nvme_stop_queues(&ctrl->ctrl); @@ -1378,6 +1391,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved) return BLK_EH_HANDLED; } +/* + * We cannot accept any other command until the Connect command has completed. + */ +static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, + struct request *rq) +{ + if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { + struct nvme_command *cmd = (struct nvme_command *)rq->cmd; + + if (rq->cmd_type != REQ_TYPE_DRV_PRIV || + cmd->common.opcode != nvme_fabrics_command || + cmd->fabrics.fctype != nvme_fabrics_type_connect) + return false; + } + + return true; +} + static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -1394,6 +1425,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, WARN_ON_ONCE(rq->tag < 0); + if (!nvme_rdma_queue_is_ready(queue, rq)) + return BLK_MQ_RQ_QUEUE_BUSY; + dev = queue->device->dev; ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(struct nvme_command), DMA_TO_DEVICE); @@ -1544,6 +1578,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) if (error) goto out_cleanup_queue; + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); + error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); if (error) { dev_err(ctrl->ctrl.device, diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b4cacb6f0258..a21437a33adb 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -838,9 +838,13 @@ static void nvmet_fatal_error_handler(struct work_struct *work) void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) { - ctrl->csts |= NVME_CSTS_CFS; - INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); - schedule_work(&ctrl->fatal_err_work); + mutex_lock(&ctrl->lock); + if (!(ctrl->csts & NVME_CSTS_CFS)) { + ctrl->csts |= NVME_CSTS_CFS; + INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); + schedule_work(&ctrl->fatal_err_work); + } + mutex_unlock(&ctrl->lock); } EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index f8d23999e0f2..005ef5d17a19 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -951,6 +951,7 @@ err_destroy_cq: static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) { + ib_drain_qp(queue->cm_id->qp); rdma_destroy_qp(queue->cm_id); ib_free_cq(queue->cq); } @@ -1066,6 +1067,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, spin_lock_init(&queue->rsp_wr_wait_lock); INIT_LIST_HEAD(&queue->free_rsps); spin_lock_init(&queue->rsps_lock); + INIT_LIST_HEAD(&queue->queue_list); queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); if (queue->idx < 0) { @@ -1244,7 +1246,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) if (disconnect) { rdma_disconnect(queue->cm_id); - ib_drain_qp(queue->cm_id->qp); schedule_work(&queue->release_work); } } @@ -1269,7 +1270,12 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, { WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); - pr_err("failed to connect queue\n"); + mutex_lock(&nvmet_rdma_queue_mutex); + if (!list_empty(&queue->queue_list)) + list_del_init(&queue->queue_list); + mutex_unlock(&nvmet_rdma_queue_mutex); + + pr_err("failed to connect queue %d\n", queue->idx); schedule_work(&queue->release_work); } @@ -1352,7 +1358,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: - nvmet_rdma_queue_disconnect(queue); + /* + * We might end up here when we already freed the qp + * which means queue release sequence is in progress, + * so don't get in the way... + */ + if (queue) + nvmet_rdma_queue_disconnect(queue); break; case RDMA_CM_EVENT_DEVICE_REMOVAL: ret = nvmet_rdma_device_removal(cm_id, queue); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index b470f7e3521d..262281bd68fa 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -292,6 +292,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np) mdiodev = to_mdio_device(d); if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) return to_phy_device(d); + put_device(d); } return NULL; @@ -456,8 +457,11 @@ int of_phy_register_fixed_link(struct device_node *np) status.link = 1; status.duplex = of_property_read_bool(fixed_link_node, "full-duplex"); - if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) + if (of_property_read_u32(fixed_link_node, "speed", + &status.speed)) { + of_node_put(fixed_link_node); return -EINVAL; + } status.pause = of_property_read_bool(fixed_link_node, "pause"); status.asym_pause = of_property_read_bool(fixed_link_node, "asym-pause"); @@ -486,3 +490,18 @@ int of_phy_register_fixed_link(struct device_node *np) return -ENODEV; } EXPORT_SYMBOL(of_phy_register_fixed_link); + +void of_phy_deregister_fixed_link(struct device_node *np) +{ + struct phy_device *phydev; + + phydev = of_phy_find_device(np); + if (!phydev) + return; + + fixed_phy_unregister(phydev); + + put_device(&phydev->mdio.dev); /* of_phy_find_device() */ + phy_device_free(phydev); /* fixed_phy_register() */ +} +EXPORT_SYMBOL(of_phy_deregister_fixed_link); diff --git a/drivers/pci/host/pcie-designware-plat.c b/drivers/pci/host/pcie-designware-plat.c index 8df6312ed300..1a02038c4640 100644 --- a/drivers/pci/host/pcie-designware-plat.c +++ b/drivers/pci/host/pcie-designware-plat.c @@ -3,7 +3,7 @@ * * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) * - * Authors: Joao Pinto + * Authors: Joao Pinto * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c index 55f453de562e..c7f3408e3148 100644 --- a/drivers/pci/pci-mid.c +++ b/drivers/pci/pci-mid.c @@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) return intel_mid_pci_set_power_state(pdev, state); } +static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) +{ + return intel_mid_pci_get_power_state(pdev); +} + static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) { return PCI_D3hot; @@ -52,6 +57,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev) static struct pci_platform_pm_ops mid_pci_platform_pm = { .is_manageable = mid_pci_power_manageable, .set_state = mid_pci_set_power_state, + .get_state = mid_pci_get_power_state, .choose_state = mid_pci_choose_state, .sleep_wake = mid_pci_sleep_wake, .run_wake = mid_pci_run_wake, diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index db553dc22c8e..2b6a59266689 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c @@ -307,20 +307,6 @@ out: return 0; } -static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) -{ - while (1) { - if (!pci_is_pcie(dev)) - break; - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - return dev; - if (!dev->bus->self) - break; - dev = dev->bus->self; - } - return NULL; -} - static int find_aer_device_iter(struct device *device, void *data) { struct pcie_device **result = data; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ab002671fa60..104c46d53121 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -1439,6 +1439,21 @@ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp) dev_warn(&dev->dev, "PCI-X settings not supported\n"); } +static bool pcie_root_rcb_set(struct pci_dev *dev) +{ + struct pci_dev *rp = pcie_find_root_port(dev); + u16 lnkctl; + + if (!rp) + return false; + + pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); + if (lnkctl & PCI_EXP_LNKCTL_RCB) + return true; + + return false; +} + static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) { int pos; @@ -1468,9 +1483,20 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); /* Initialize Link Control Register */ - if (pcie_cap_has_lnkctl(dev)) + if (pcie_cap_has_lnkctl(dev)) { + + /* + * If the Root Port supports Read Completion Boundary of + * 128, set RCB to 128. Otherwise, clear it. + */ + hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; + hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; + if (pcie_root_rcb_set(dev)) + hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; + pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); + } /* Find Advanced Error Reporting Enhanced Capability */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 87e6334eab93..547ca7b3f098 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c @@ -459,8 +459,6 @@ static int twl4030_phy_power_off(struct phy *phy) struct twl4030_usb *twl = phy_get_drvdata(phy); dev_dbg(twl->dev, "%s\n", __func__); - pm_runtime_mark_last_busy(twl->dev); - pm_runtime_put_autosuspend(twl->dev); return 0; } @@ -472,6 +470,8 @@ static int twl4030_phy_power_on(struct phy *phy) dev_dbg(twl->dev, "%s\n", __func__); pm_runtime_get_sync(twl->dev); schedule_delayed_work(&twl->id_workaround_work, HZ); + pm_runtime_mark_last_busy(twl->dev); + pm_runtime_put_autosuspend(twl->dev); return 0; } diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index 381871b2bb46..9d5bd7d5c610 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -474,6 +474,7 @@ static int meson_pwm_probe(struct platform_device *pdev) if (IS_ERR(meson->base)) return PTR_ERR(meson->base); + spin_lock_init(&meson->lock); meson->chip.dev = &pdev->dev; meson->chip.ops = &meson_pwm_ops; meson->chip.base = -1; diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 0296d8178ae2..a813239300c3 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -425,6 +425,8 @@ void pwmchip_sysfs_unexport_children(struct pwm_chip *chip) if (test_bit(PWMF_EXPORTED, &pwm->flags)) pwm_unexport_child(parent, pwm); } + + put_device(parent); } static int __init pwm_sysfs_init(void) diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c index 18a93d3e3f93..d36534965635 100644 --- a/drivers/rtc/rtc-asm9260.c +++ b/drivers/rtc/rtc-asm9260.c @@ -327,6 +327,7 @@ static const struct of_device_id asm9260_dt_ids[] = { { .compatible = "alphascale,asm9260-rtc", }, {} }; +MODULE_DEVICE_TABLE(of, asm9260_dt_ids); static struct platform_driver asm9260_rtc_driver = { .probe = asm9260_rtc_probe, diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index dd3d59806ffa..7030d7cd3861 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -776,7 +776,7 @@ static void cmos_do_shutdown(int rtc_irq) spin_unlock_irq(&rtc_lock); } -static void __exit cmos_do_remove(struct device *dev) +static void cmos_do_remove(struct device *dev) { struct cmos_rtc *cmos = dev_get_drvdata(dev); struct resource *ports; @@ -996,8 +996,9 @@ static u32 rtc_handler(void *context) struct cmos_rtc *cmos = dev_get_drvdata(dev); unsigned char rtc_control = 0; unsigned char rtc_intr; + unsigned long flags; - spin_lock_irq(&rtc_lock); + spin_lock_irqsave(&rtc_lock, flags); if (cmos_rtc.suspend_ctrl) rtc_control = CMOS_READ(RTC_CONTROL); if (rtc_control & RTC_AIE) { @@ -1006,7 +1007,7 @@ static u32 rtc_handler(void *context) rtc_intr = CMOS_READ(RTC_INTR_FLAGS); rtc_update_irq(cmos->rtc, 1, rtc_intr); } - spin_unlock_irq(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); pm_wakeup_event(dev, 0); acpi_clear_event(ACPI_EVENT_RTC); @@ -1129,7 +1130,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) pnp_irq(pnp, 0)); } -static void __exit cmos_pnp_remove(struct pnp_dev *pnp) +static void cmos_pnp_remove(struct pnp_dev *pnp) { cmos_do_remove(&pnp->dev); } @@ -1161,7 +1162,7 @@ static struct pnp_driver cmos_pnp_driver = { .name = (char *) driver_name, .id_table = rtc_ids, .probe = cmos_pnp_probe, - .remove = __exit_p(cmos_pnp_remove), + .remove = cmos_pnp_remove, .shutdown = cmos_pnp_shutdown, /* flag ensures resume() gets called, and stops syslog spam */ @@ -1238,7 +1239,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev) return cmos_do_probe(&pdev->dev, resource, irq); } -static int __exit cmos_platform_remove(struct platform_device *pdev) +static int cmos_platform_remove(struct platform_device *pdev) { cmos_do_remove(&pdev->dev); return 0; @@ -1263,7 +1264,7 @@ static void cmos_platform_shutdown(struct platform_device *pdev) MODULE_ALIAS("platform:rtc_cmos"); static struct platform_driver cmos_platform_driver = { - .remove = __exit_p(cmos_platform_remove), + .remove = cmos_platform_remove, .shutdown = cmos_platform_shutdown, .driver = { .name = driver_name, diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index b04ea9b5ae67..51e52446eacb 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -113,6 +113,7 @@ /* OMAP_RTC_OSC_REG bit fields: */ #define OMAP_RTC_OSC_32KCLK_EN BIT(6) #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) +#define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4) /* OMAP_RTC_IRQWAKEEN bit fields: */ #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) @@ -146,6 +147,7 @@ struct omap_rtc { u8 interrupts_reg; bool is_pmic_controller; bool has_ext_clk; + bool is_suspending; const struct omap_rtc_device_type *type; struct pinctrl_dev *pctldev; }; @@ -786,8 +788,9 @@ static int omap_rtc_probe(struct platform_device *pdev) */ if (rtc->has_ext_clk) { reg = rtc_read(rtc, OMAP_RTC_OSC_REG); - rtc_write(rtc, OMAP_RTC_OSC_REG, - reg | OMAP_RTC_OSC_SEL_32KCLK_SRC); + reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE; + reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC; + rtc_writel(rtc, OMAP_RTC_OSC_REG, reg); } rtc->type->lock(rtc); @@ -898,8 +901,7 @@ static int omap_rtc_suspend(struct device *dev) rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0); rtc->type->lock(rtc); - /* Disable the clock/module */ - pm_runtime_put_sync(dev); + rtc->is_suspending = true; return 0; } @@ -908,9 +910,6 @@ static int omap_rtc_resume(struct device *dev) { struct omap_rtc *rtc = dev_get_drvdata(dev); - /* Enable the clock/module so that we can access the registers */ - pm_runtime_get_sync(dev); - rtc->type->unlock(rtc); if (device_may_wakeup(dev)) disable_irq_wake(rtc->irq_alarm); @@ -918,11 +917,34 @@ static int omap_rtc_resume(struct device *dev) rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg); rtc->type->lock(rtc); + rtc->is_suspending = false; + return 0; } #endif -static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume); +#ifdef CONFIG_PM +static int omap_rtc_runtime_suspend(struct device *dev) +{ + struct omap_rtc *rtc = dev_get_drvdata(dev); + + if (rtc->is_suspending && !rtc->has_ext_clk) + return -EBUSY; + + return 0; +} + +static int omap_rtc_runtime_resume(struct device *dev) +{ + return 0; +} +#endif + +static const struct dev_pm_ops omap_rtc_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume) + SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend, + omap_rtc_runtime_resume, NULL) +}; static void omap_rtc_shutdown(struct platform_device *pdev) { diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index aebc4ddb3060..ac05317bba7f 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1083,7 +1083,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) nonemb_cmd = &phba->boot_struct.nonemb_cmd; nonemb_cmd->size = sizeof(*resp); nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev, - sizeof(nonemb_cmd->size), + nonemb_cmd->size, &nonemb_cmd->dma); if (!nonemb_cmd->va) { mutex_unlock(&ctrl->mbox_lock); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index d007ec18179a..a1d6ab76a514 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -2009,7 +2009,7 @@ static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, static int hpsa_slave_alloc(struct scsi_device *sdev) { - struct hpsa_scsi_dev_t *sd; + struct hpsa_scsi_dev_t *sd = NULL; unsigned long flags; struct ctlr_info *h; @@ -2026,7 +2026,8 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) sd->target = sdev_id(sdev); sd->lun = sdev->lun; } - } else + } + if (!sd) sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), sdev_id(sdev), sdev->lun); @@ -3840,6 +3841,7 @@ static int hpsa_update_device_info(struct ctlr_info *h, sizeof(this_device->vendor)); memcpy(this_device->model, &inq_buff[16], sizeof(this_device->model)); + this_device->rev = inq_buff[2]; memset(this_device->device_id, 0, sizeof(this_device->device_id)); if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, @@ -3929,10 +3931,14 @@ static void figure_bus_target_lun(struct ctlr_info *h, if (!is_logical_dev_addr_mode(lunaddrbytes)) { /* physical device, target and lun filled in later */ - if (is_hba_lunid(lunaddrbytes)) + if (is_hba_lunid(lunaddrbytes)) { + int bus = HPSA_HBA_BUS; + + if (!device->rev) + bus = HPSA_LEGACY_HBA_BUS; hpsa_set_bus_target_lun(device, - HPSA_HBA_BUS, 0, lunid & 0x3fff); - else + bus, 0, lunid & 0x3fff); + } else /* defer target, lun assignment for physical devices */ hpsa_set_bus_target_lun(device, HPSA_PHYSICAL_DEVICE_BUS, -1, -1); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 82cdfad874f3..9ea162de80dc 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -69,6 +69,7 @@ struct hpsa_scsi_dev_t { u64 sas_address; unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ unsigned char model[16]; /* bytes 16-31 of inquiry data */ + unsigned char rev; /* byte 2 of inquiry data */ unsigned char raid_level; /* from inquiry page 0xC1 */ unsigned char volume_offline; /* discovered via TUR or VPD */ u16 queue_depth; /* max queue_depth for this device */ @@ -402,6 +403,7 @@ struct offline_device_entry { #define HPSA_RAID_VOLUME_BUS 1 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2 #define HPSA_HBA_BUS 0 +#define HPSA_LEGACY_HBA_BUS 3 /* Send the command to the hardware diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 04ce7cfb6d1b..50c71678a156 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -308,7 +308,7 @@ struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost) fc_stats = &lport->host_stats; memset(fc_stats, 0, sizeof(struct fc_host_statistics)); - fc_stats->seconds_since_last_reset = (lport->boot_time - jiffies) / HZ; + fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ; for_each_possible_cpu(cpu) { struct fc_stats *stats; diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8aa769a2d919..1c4744e78173 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3885,6 +3885,11 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, } } +static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) +{ + return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); +} + /** * _scsih_flush_running_cmds - completing outstanding commands. * @ioc: per adapter object @@ -3906,6 +3911,9 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) if (!scmd) continue; count++; + if (ata_12_16_cmd(scmd)) + scsi_internal_device_unblock(scmd->device, + SDEV_RUNNING); mpt3sas_base_free_smid(ioc, smid); scsi_dma_unmap(scmd); if (ioc->pci_error_recovery) @@ -4010,8 +4018,6 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) SAM_STAT_CHECK_CONDITION; } - - /** * scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object @@ -4038,6 +4044,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (ioc->logging_level & MPT_DEBUG_SCSI) scsi_print_command(scmd); + /* + * Lock the device for any subsequent command until command is + * done. + */ + if (ata_12_16_cmd(scmd)) + scsi_internal_device_block(scmd->device); + sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { scmd->result = DID_NO_CONNECT << 16; @@ -4613,6 +4626,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) if (scmd == NULL) return 1; + if (ata_12_16_cmd(scmd)) + scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); if (mpi_reply == NULL) { diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 86eb19902bac..c7cc8035eacb 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -791,8 +791,10 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf slot->slot_tag = tag; slot->buf = pci_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); - if (!slot->buf) + if (!slot->buf) { + rc = -ENOMEM; goto err_out_tag; + } memset(slot->buf, 0, MVS_SLOT_BUF_SZ); tei.task = task; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 567fa080e261..56d6142852a5 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1456,15 +1456,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - /* Get a reference to the sp and drop the lock. - * The reference ensures this sp->done() call - * - and not the call in qla2xxx_eh_abort() - - * ends the SCSI command (with result 'res'). + /* Don't abort commands in adapter during EEH + * recovery as it's not accessible/responding. */ - sp_get(sp); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - qla2xxx_eh_abort(GET_CMD_SP(sp)); - spin_lock_irqsave(&ha->hardware_lock, flags); + if (!ha->flags.eeh_busy) { + /* Get a reference to the sp and drop the lock. + * The reference ensures this sp->done() call + * - and not the call in qla2xxx_eh_abort() - + * ends the SCSI command (with result 'res'). + */ + sp_get(sp); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + qla2xxx_eh_abort(GET_CMD_SP(sp)); + spin_lock_irqsave(&ha->hardware_lock, flags); + } req->outstanding_cmds[cnt] = NULL; sp->done(vha, sp, res); } diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h index 4377e87ee79c..892a0b058b99 100644 --- a/drivers/scsi/qlogicpti.h +++ b/drivers/scsi/qlogicpti.h @@ -356,8 +356,8 @@ struct qlogicpti { /* The rest of the elements are unimportant for performance. */ struct qlogicpti *next; - __u32 res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/ - __u32 req_dvma; /* Ptr to REQUEST bufs (DVMA) */ + dma_addr_t res_dvma; /* Ptr to RESPONSE bufs (DVMA)*/ + dma_addr_t req_dvma; /* Ptr to REQUEST bufs (DVMA) */ u_char fware_majrev, fware_minrev, fware_micrev; struct Scsi_Host *qhost; int qpti_id; diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 7a223074df3d..afada655f861 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = { .set_cur_state = powerclamp_set_cur_state, }; +static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = { + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); + static int __init powerclamp_probe(void) { - if (!boot_cpu_has(X86_FEATURE_MWAIT)) { + + if (!x86_match_cpu(intel_powerclamp_ids)) { pr_err("CPU does not support MWAIT"); return -ENODEV; } diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 69426e644d17..3dbb4a21ab44 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) if (!ci) return -ENOMEM; + spin_lock_init(&ci->lock); ci->dev = dev; ci->platdata = dev_get_platdata(dev); ci->imx28_write_fix = !!(ci->platdata->flags & diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 661f43fe0f9e..c9e80ad48fdc 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1889,8 +1889,6 @@ static int udc_start(struct ci_hdrc *ci) struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; int retval = 0; - spin_lock_init(&ci->lock); - ci->gadget.ops = &usb_gadget_ops; ci->gadget.speed = USB_SPEED_UNKNOWN; ci->gadget.max_speed = USB_SPEED_HIGH; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index e40d47d47d82..17989b72cdae 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -3225,11 +3225,11 @@ static bool ffs_func_req_match(struct usb_function *f, switch (creq->bRequestType & USB_RECIP_MASK) { case USB_RECIP_INTERFACE: - return ffs_func_revmap_intf(func, - le16_to_cpu(creq->wIndex) >= 0); + return (ffs_func_revmap_intf(func, + le16_to_cpu(creq->wIndex)) >= 0); case USB_RECIP_ENDPOINT: - return ffs_func_revmap_ep(func, - le16_to_cpu(creq->wIndex) >= 0); + return (ffs_func_revmap_ep(func, + le16_to_cpu(creq->wIndex)) >= 0); default: return (bool) (func->ffs->user_flags & FUNCTIONFS_ALL_CTRL_RECIP); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index e01116e4c067..c3e172e15ec3 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -986,7 +986,7 @@ b_host: } #endif - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); return handled; } @@ -1855,14 +1855,23 @@ static void musb_pm_runtime_check_session(struct musb *musb) MUSB_DEVCTL_HR; switch (devctl & ~s) { case MUSB_QUIRK_B_INVALID_VBUS_91: - if (!musb->session && !musb->quirk_invalid_vbus) { - musb->quirk_invalid_vbus = true; + if (musb->quirk_retries--) { musb_dbg(musb, - "First invalid vbus, assume no session"); + "Poll devctl on invalid vbus, assume no session"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + return; } - break; case MUSB_QUIRK_A_DISCONNECT_19: + if (musb->quirk_retries--) { + musb_dbg(musb, + "Poll devctl on possible host mode disconnect"); + schedule_delayed_work(&musb->irq_work, + msecs_to_jiffies(1000)); + + return; + } if (!musb->session) break; musb_dbg(musb, "Allow PM on possible host mode disconnect"); @@ -1886,9 +1895,9 @@ static void musb_pm_runtime_check_session(struct musb *musb) if (error < 0) dev_err(musb->controller, "Could not enable: %i\n", error); + musb->quirk_retries = 3; } else { musb_dbg(musb, "Allow PM with no session: %02x", devctl); - musb->quirk_invalid_vbus = false; pm_runtime_mark_last_busy(musb->controller); pm_runtime_put_autosuspend(musb->controller); } @@ -1899,7 +1908,7 @@ static void musb_pm_runtime_check_session(struct musb *musb) /* Only used to provide driver mode change events */ static void musb_irq_work(struct work_struct *data) { - struct musb *musb = container_of(data, struct musb, irq_work); + struct musb *musb = container_of(data, struct musb, irq_work.work); musb_pm_runtime_check_session(musb); @@ -1969,6 +1978,7 @@ static struct musb *allocate_instance(struct device *dev, INIT_LIST_HEAD(&musb->control); INIT_LIST_HEAD(&musb->in_bulk); INIT_LIST_HEAD(&musb->out_bulk); + INIT_LIST_HEAD(&musb->pending_list); musb->vbuserr_retry = VBUSERR_RETRY_COUNT; musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; @@ -2018,6 +2028,84 @@ static void musb_free(struct musb *musb) musb_host_free(musb); } +struct musb_pending_work { + int (*callback)(struct musb *musb, void *data); + void *data; + struct list_head node; +}; + +/* + * Called from musb_runtime_resume(), musb_resume(), and + * musb_queue_resume_work(). Callers must take musb->lock. + */ +static int musb_run_resume_work(struct musb *musb) +{ + struct musb_pending_work *w, *_w; + unsigned long flags; + int error = 0; + + spin_lock_irqsave(&musb->list_lock, flags); + list_for_each_entry_safe(w, _w, &musb->pending_list, node) { + if (w->callback) { + error = w->callback(musb, w->data); + if (error < 0) { + dev_err(musb->controller, + "resume callback %p failed: %i\n", + w->callback, error); + } + } + list_del(&w->node); + devm_kfree(musb->controller, w); + } + spin_unlock_irqrestore(&musb->list_lock, flags); + + return error; +} + +/* + * Called to run work if device is active or else queue the work to happen + * on resume. Caller must take musb->lock and must hold an RPM reference. + * + * Note that we cowardly refuse queuing work after musb PM runtime + * resume is done calling musb_run_resume_work() and return -EINPROGRESS + * instead. + */ +int musb_queue_resume_work(struct musb *musb, + int (*callback)(struct musb *musb, void *data), + void *data) +{ + struct musb_pending_work *w; + unsigned long flags; + int error; + + if (WARN_ON(!callback)) + return -EINVAL; + + if (pm_runtime_active(musb->controller)) + return callback(musb, data); + + w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); + if (!w) + return -ENOMEM; + + w->callback = callback; + w->data = data; + spin_lock_irqsave(&musb->list_lock, flags); + if (musb->is_runtime_suspended) { + list_add_tail(&w->node, &musb->pending_list); + error = 0; + } else { + dev_err(musb->controller, "could not add resume work %p\n", + callback); + devm_kfree(musb->controller, w); + error = -EINPROGRESS; + } + spin_unlock_irqrestore(&musb->list_lock, flags); + + return error; +} +EXPORT_SYMBOL_GPL(musb_queue_resume_work); + static void musb_deassert_reset(struct work_struct *work) { struct musb *musb; @@ -2065,6 +2153,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) } spin_lock_init(&musb->lock); + spin_lock_init(&musb->list_lock); musb->board_set_power = plat->set_power; musb->min_power = plat->min_power; musb->ops = plat->platform_ops; @@ -2208,7 +2297,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) musb_generic_disable(musb); /* Init IRQ workqueue before request_irq */ - INIT_WORK(&musb->irq_work, musb_irq_work); + INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work); INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume); @@ -2291,6 +2380,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (status) goto fail5; + musb->is_initialized = 1; pm_runtime_mark_last_busy(musb->controller); pm_runtime_put_autosuspend(musb->controller); @@ -2304,7 +2394,7 @@ fail4: musb_host_cleanup(musb); fail3: - cancel_work_sync(&musb->irq_work); + cancel_delayed_work_sync(&musb->irq_work); cancel_delayed_work_sync(&musb->finish_resume_work); cancel_delayed_work_sync(&musb->deassert_reset_work); if (musb->dma_controller) @@ -2371,7 +2461,7 @@ static int musb_remove(struct platform_device *pdev) */ musb_exit_debugfs(musb); - cancel_work_sync(&musb->irq_work); + cancel_delayed_work_sync(&musb->irq_work); cancel_delayed_work_sync(&musb->finish_resume_work); cancel_delayed_work_sync(&musb->deassert_reset_work); pm_runtime_get_sync(musb->controller); @@ -2557,6 +2647,7 @@ static int musb_suspend(struct device *dev) musb_platform_disable(musb); musb_generic_disable(musb); + WARN_ON(!list_empty(&musb->pending_list)); spin_lock_irqsave(&musb->lock, flags); @@ -2578,9 +2669,11 @@ static int musb_suspend(struct device *dev) static int musb_resume(struct device *dev) { - struct musb *musb = dev_to_musb(dev); - u8 devctl; - u8 mask; + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + int error; + u8 devctl; + u8 mask; /* * For static cmos like DaVinci, register values were preserved @@ -2614,6 +2707,13 @@ static int musb_resume(struct device *dev) musb_start(musb); + spin_lock_irqsave(&musb->lock, flags); + error = musb_run_resume_work(musb); + if (error) + dev_err(musb->controller, "resume work failed with %i\n", + error); + spin_unlock_irqrestore(&musb->lock, flags); + return 0; } @@ -2622,14 +2722,16 @@ static int musb_runtime_suspend(struct device *dev) struct musb *musb = dev_to_musb(dev); musb_save_context(musb); + musb->is_runtime_suspended = 1; return 0; } static int musb_runtime_resume(struct device *dev) { - struct musb *musb = dev_to_musb(dev); - static int first = 1; + struct musb *musb = dev_to_musb(dev); + unsigned long flags; + int error; /* * When pm_runtime_get_sync called for the first time in driver @@ -2640,9 +2742,10 @@ static int musb_runtime_resume(struct device *dev) * Also context restore without save does not make * any sense */ - if (!first) - musb_restore_context(musb); - first = 0; + if (!musb->is_initialized) + return 0; + + musb_restore_context(musb); if (musb->need_finish_resume) { musb->need_finish_resume = 0; @@ -2650,6 +2753,14 @@ static int musb_runtime_resume(struct device *dev) msecs_to_jiffies(USB_RESUME_TIMEOUT)); } + spin_lock_irqsave(&musb->lock, flags); + error = musb_run_resume_work(musb); + if (error) + dev_err(musb->controller, "resume work failed with %i\n", + error); + musb->is_runtime_suspended = 0; + spin_unlock_irqrestore(&musb->lock, flags); + return 0; } diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 2cb88a498f8a..91817d77d59c 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -303,13 +303,14 @@ struct musb_context_registers { struct musb { /* device lock */ spinlock_t lock; + spinlock_t list_lock; /* resume work list lock */ struct musb_io io; const struct musb_platform_ops *ops; struct musb_context_registers context; irqreturn_t (*isr)(int, void *); - struct work_struct irq_work; + struct delayed_work irq_work; struct delayed_work deassert_reset_work; struct delayed_work finish_resume_work; struct delayed_work gadget_work; @@ -337,6 +338,7 @@ struct musb { struct list_head control; /* of musb_qh */ struct list_head in_bulk; /* of musb_qh */ struct list_head out_bulk; /* of musb_qh */ + struct list_head pending_list; /* pending work list */ struct timer_list otg_timer; struct notifier_block nb; @@ -379,12 +381,15 @@ struct musb { int port_mode; /* MUSB_PORT_MODE_* */ bool session; - bool quirk_invalid_vbus; + unsigned long quirk_retries; bool is_host; int a_wait_bcon; /* VBUS timeout in msecs */ unsigned long idle_timeout; /* Next timeout in jiffies */ + unsigned is_initialized:1; + unsigned is_runtime_suspended:1; + /* active means connected and not suspended */ unsigned is_active:1; @@ -540,6 +545,10 @@ extern irqreturn_t musb_interrupt(struct musb *); extern void musb_hnp_stop(struct musb *musb); +int musb_queue_resume_work(struct musb *musb, + int (*callback)(struct musb *musb, void *data), + void *data); + static inline void musb_platform_set_vbus(struct musb *musb, int is_on) { if (musb->ops->set_vbus) diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 0f17d2140db6..feae1561b9ab 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c @@ -185,24 +185,19 @@ static void dsps_musb_disable(struct musb *musb) musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); musb_writel(reg_base, wrp->epintr_clear, wrp->txep_bitmap | wrp->rxep_bitmap); + del_timer_sync(&glue->timer); musb_writeb(musb->mregs, MUSB_DEVCTL, 0); } -static void otg_timer(unsigned long _musb) +/* Caller must take musb->lock */ +static int dsps_check_status(struct musb *musb, void *unused) { - struct musb *musb = (void *)_musb; void __iomem *mregs = musb->mregs; struct device *dev = musb->controller; struct dsps_glue *glue = dev_get_drvdata(dev->parent); const struct dsps_musb_wrapper *wrp = glue->wrp; u8 devctl; - unsigned long flags; int skip_session = 0; - int err; - - err = pm_runtime_get_sync(dev); - if (err < 0) - dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); /* * We poll because DSPS IP's won't expose several OTG-critical @@ -212,7 +207,6 @@ static void otg_timer(unsigned long _musb) dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, usb_otg_state_string(musb->xceiv->otg->state)); - spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->otg->state) { case OTG_STATE_A_WAIT_VRISE: mod_timer(&glue->timer, jiffies + @@ -245,8 +239,30 @@ static void otg_timer(unsigned long _musb) default: break; } - spin_unlock_irqrestore(&musb->lock, flags); + return 0; +} + +static void otg_timer(unsigned long _musb) +{ + struct musb *musb = (void *)_musb; + struct device *dev = musb->controller; + unsigned long flags; + int err; + + err = pm_runtime_get(dev); + if ((err != -EINPROGRESS) && err < 0) { + dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); + pm_runtime_put_noidle(dev); + + return; + } + + spin_lock_irqsave(&musb->lock, flags); + err = musb_queue_resume_work(musb, dsps_check_status, NULL); + if (err < 0) + dev_err(dev, "%s resume work: %i\n", __func__, err); + spin_unlock_irqrestore(&musb->lock, flags); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } @@ -767,28 +783,13 @@ static int dsps_probe(struct platform_device *pdev) platform_set_drvdata(pdev, glue); pm_runtime_enable(&pdev->dev); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, 200); - - ret = pm_runtime_get_sync(&pdev->dev); - if (ret < 0) { - dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); - goto err2; - } - ret = dsps_create_musb_pdev(glue, pdev); if (ret) - goto err3; - - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); + goto err; return 0; -err3: - pm_runtime_put_sync(&pdev->dev); -err2: - pm_runtime_dont_use_autosuspend(&pdev->dev); +err: pm_runtime_disable(&pdev->dev); return ret; } @@ -799,9 +800,6 @@ static int dsps_remove(struct platform_device *pdev) platform_device_unregister(glue->musb); - /* disable usbss clocks */ - pm_runtime_dont_use_autosuspend(&pdev->dev); - pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 4042ea017985..a55173c9e564 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1114,7 +1114,7 @@ static int musb_gadget_enable(struct usb_ep *ep, musb_ep->dma ? "dma, " : "", musb_ep->packet_sz); - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); fail: spin_unlock_irqrestore(&musb->lock, flags); @@ -1158,7 +1158,7 @@ static int musb_gadget_disable(struct usb_ep *ep) musb_ep->desc = NULL; musb_ep->end_point.desc = NULL; - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); spin_unlock_irqrestore(&(musb->lock), flags); @@ -1222,13 +1222,22 @@ void musb_ep_restart(struct musb *musb, struct musb_request *req) rxstate(musb, req); } +static int musb_ep_restart_resume_work(struct musb *musb, void *data) +{ + struct musb_request *req = data; + + musb_ep_restart(musb, req); + + return 0; +} + static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { struct musb_ep *musb_ep; struct musb_request *request; struct musb *musb; - int status = 0; + int status; unsigned long lockflags; if (!ep || !req) @@ -1245,6 +1254,17 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, if (request->ep != musb_ep) return -EINVAL; + status = pm_runtime_get(musb->controller); + if ((status != -EINPROGRESS) && status < 0) { + dev_err(musb->controller, + "pm runtime get failed in %s\n", + __func__); + pm_runtime_put_noidle(musb->controller); + + return status; + } + status = 0; + trace_musb_req_enq(request); /* request is mine now... */ @@ -1255,7 +1275,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, map_dma_buffer(request, musb, musb_ep); - pm_runtime_get_sync(musb->controller); spin_lock_irqsave(&musb->lock, lockflags); /* don't queue if the ep is down */ @@ -1271,8 +1290,14 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, list_add_tail(&request->list, &musb_ep->req_list); /* it this is the head of the queue, start i/o ... */ - if (!musb_ep->busy && &request->list == musb_ep->req_list.next) - musb_ep_restart(musb, request); + if (!musb_ep->busy && &request->list == musb_ep->req_list.next) { + status = musb_queue_resume_work(musb, + musb_ep_restart_resume_work, + request); + if (status < 0) + dev_err(musb->controller, "%s resume work: %i\n", + __func__, status); + } unlock: spin_unlock_irqrestore(&musb->lock, lockflags); @@ -1969,7 +1994,7 @@ static int musb_gadget_stop(struct usb_gadget *g) */ /* Force check of devctl register for PM runtime */ - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); pm_runtime_mark_last_busy(musb->controller); pm_runtime_put_autosuspend(musb->controller); diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index cc1225485509..e8be8e39ab8f 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -513,17 +513,18 @@ static int omap2430_probe(struct platform_device *pdev) } pm_runtime_enable(glue->dev); - pm_runtime_use_autosuspend(glue->dev); - pm_runtime_set_autosuspend_delay(glue->dev, 100); ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); - goto err2; + goto err3; } return 0; +err3: + pm_runtime_disable(glue->dev); + err2: platform_device_put(musb); @@ -535,10 +536,7 @@ static int omap2430_remove(struct platform_device *pdev) { struct omap2430_glue *glue = platform_get_drvdata(pdev); - pm_runtime_get_sync(glue->dev); platform_device_unregister(glue->musb); - pm_runtime_put_sync(glue->dev); - pm_runtime_dont_use_autosuspend(glue->dev); pm_runtime_disable(glue->dev); return 0; diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index df7c9f46be54..e85cc8e4e7a9 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -724,7 +724,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", usb_otg_state_string(musb->xceiv->otg->state), otg_stat); idle_timeout = jiffies + (1 * HZ); - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); } else /* A-dev state machine */ { dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", @@ -814,7 +814,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) break; } } - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); return idle_timeout; } @@ -864,7 +864,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci) musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); if (reg & ~TUSB_PRCM_WNORCS) { musb->is_active = 1; - schedule_work(&musb->irq_work); + schedule_delayed_work(&musb->irq_work, 0); } dev_dbg(musb->controller, "wake %sactive %02x\n", musb->is_active ? "" : "in", reg); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f61477bed3a8..243ac5ebe46a 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ + { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 0ff7f38d7800..6e9fc8bcc285 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, + { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 21011c0a4c64..48ee04c94a75 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -595,6 +595,12 @@ #define ATMEL_VID 0x03eb /* Vendor ID */ #define STK541_PID 0x2109 /* Zigbee Controller */ +/* + * Texas Instruments + */ +#define TI_VID 0x0451 +#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */ + /* * Blackfin gnICE JTAG * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index ffd086733421..1a59f335b063 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us) /* COMMAND STAGE */ /* let's send the command via the control pipe */ + /* + * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. + * Stack may be vmallocated. So no DMA for us. Make a copy. + */ + memcpy(us->iobuf, srb->cmnd, srb->cmd_len); result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, US_CBI_ADSC, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, - us->ifnum, srb->cmnd, srb->cmd_len); + us->ifnum, us->iobuf, srb->cmd_len); /* check the return code for the command */ usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c index 19ad8645d93c..e5d9bfc1703a 100644 --- a/drivers/video/fbdev/amba-clcd-versatile.c +++ b/drivers/video/fbdev/amba-clcd-versatile.c @@ -526,8 +526,8 @@ int versatile_clcd_init_panel(struct clcd_fb *fb, np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, &clcd_id); if (!np) { - dev_err(dev, "no Versatile syscon node\n"); - return -ENODEV; + /* Vexpress does not have this */ + return 0; } versatile_clcd_type = (enum versatile_clcd)clcd_id->data; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index fdd3228e0678..3eb58cb51e56 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -155,6 +155,7 @@ config TANGOX_WATCHDOG config WDAT_WDT tristate "ACPI Watchdog Action Table (WDAT)" depends on ACPI + select WATCHDOG_CORE select ACPI_WATCHDOG help This driver adds support for systems with ACPI Watchdog Action diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 8347c90cf483..5eb04129f938 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -808,7 +808,11 @@ calc_seckey(struct cifs_ses *ses) struct crypto_skcipher *tfm_arc4; struct scatterlist sgin, sgout; struct skcipher_request *req; - unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */ + unsigned char *sec_key; + + sec_key = kmalloc(CIFS_SESS_KEY_SIZE, GFP_KERNEL); + if (sec_key == NULL) + return -ENOMEM; get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); @@ -816,7 +820,7 @@ calc_seckey(struct cifs_ses *ses) if (IS_ERR(tfm_arc4)) { rc = PTR_ERR(tfm_arc4); cifs_dbg(VFS, "could not allocate crypto API arc4\n"); - return rc; + goto out; } rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response, @@ -854,7 +858,8 @@ calc_seckey(struct cifs_ses *ses) out_free_cipher: crypto_free_skcipher(tfm_arc4); - +out: + kfree(sec_key); return rc; } diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 3f3185febc58..e3fed9249a04 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -3427,6 +3427,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL, __u16 rc = 0; struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data; struct posix_acl_xattr_header *local_acl = (void *)pACL; + struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1); int count; int i; @@ -3453,8 +3454,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL, return 0; } for (i = 0; i < count; i++) { - rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], - (struct posix_acl_xattr_entry *)(local_acl + 1)); + rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &ace[i]); if (rc != 0) { /* ACE not converted */ break; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index aab5227979e2..4547aeddd12b 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -412,6 +412,9 @@ cifs_reconnect(struct TCP_Server_Info *server) } } while (server->tcpStatus == CifsNeedReconnect); + if (server->tcpStatus == CifsNeedNegotiate) + mod_delayed_work(cifsiod_wq, &server->echo, 0); + return rc; } @@ -421,17 +424,25 @@ cifs_echo_request(struct work_struct *work) int rc; struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); - unsigned long echo_interval = server->echo_interval; + unsigned long echo_interval; /* - * We cannot send an echo if it is disabled or until the - * NEGOTIATE_PROTOCOL request is done, which is indicated by - * server->ops->need_neg() == true. Also, no need to ping if - * we got a response recently. + * If we need to renegotiate, set echo interval to zero to + * immediately call echo service where we can renegotiate. + */ + if (server->tcpStatus == CifsNeedNegotiate) + echo_interval = 0; + else + echo_interval = server->echo_interval; + + /* + * We cannot send an echo if it is disabled. + * Also, no need to ping if we got a response recently. */ if (server->tcpStatus == CifsNeedReconnect || - server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew || + server->tcpStatus == CifsExiting || + server->tcpStatus == CifsNew || (server->ops->can_echo && !server->ops->can_echo(server)) || time_before(jiffies, server->lstrp + echo_interval - HZ)) goto requeue_echo; @@ -442,7 +453,7 @@ cifs_echo_request(struct work_struct *work) server->hostname); requeue_echo: - queue_delayed_work(cifsiod_wq, &server->echo, echo_interval); + queue_delayed_work(cifsiod_wq, &server->echo, server->echo_interval); } static bool diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 9a28133ac3b8..9b774f4b50c8 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -39,65 +39,54 @@ static void fname_crypt_complete(struct crypto_async_request *req, int res) static int fname_encrypt(struct inode *inode, const struct qstr *iname, struct fscrypt_str *oname) { - u32 ciphertext_len; struct skcipher_request *req = NULL; DECLARE_FS_COMPLETION_RESULT(ecr); struct fscrypt_info *ci = inode->i_crypt_info; struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; char iv[FS_CRYPTO_BLOCK_SIZE]; - struct scatterlist src_sg, dst_sg; + struct scatterlist sg; int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); - char *workbuf, buf[32], *alloc_buf = NULL; - unsigned lim; + unsigned int lim; + unsigned int cryptlen; lim = inode->i_sb->s_cop->max_namelen(inode); if (iname->len <= 0 || iname->len > lim) return -EIO; - ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE); - ciphertext_len = round_up(ciphertext_len, padding); - ciphertext_len = min(ciphertext_len, lim); + /* + * Copy the filename to the output buffer for encrypting in-place and + * pad it with the needed number of NUL bytes. + */ + cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE); + cryptlen = round_up(cryptlen, padding); + cryptlen = min(cryptlen, lim); + memcpy(oname->name, iname->name, iname->len); + memset(oname->name + iname->len, 0, cryptlen - iname->len); - if (ciphertext_len <= sizeof(buf)) { - workbuf = buf; - } else { - alloc_buf = kmalloc(ciphertext_len, GFP_NOFS); - if (!alloc_buf) - return -ENOMEM; - workbuf = alloc_buf; - } + /* Initialize the IV */ + memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); - /* Allocate request */ + /* Set up the encryption request */ req = skcipher_request_alloc(tfm, GFP_NOFS); if (!req) { printk_ratelimited(KERN_ERR - "%s: crypto_request_alloc() failed\n", __func__); - kfree(alloc_buf); + "%s: skcipher_request_alloc() failed\n", __func__); return -ENOMEM; } skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, fname_crypt_complete, &ecr); + sg_init_one(&sg, oname->name, cryptlen); + skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); - /* Copy the input */ - memcpy(workbuf, iname->name, iname->len); - if (iname->len < ciphertext_len) - memset(workbuf + iname->len, 0, ciphertext_len - iname->len); - - /* Initialize IV */ - memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); - - /* Create encryption request */ - sg_init_one(&src_sg, workbuf, ciphertext_len); - sg_init_one(&dst_sg, oname->name, ciphertext_len); - skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv); + /* Do the encryption */ res = crypto_skcipher_encrypt(req); if (res == -EINPROGRESS || res == -EBUSY) { + /* Request is being completed asynchronously; wait for it */ wait_for_completion(&ecr.completion); res = ecr.res; } - kfree(alloc_buf); skcipher_request_free(req); if (res < 0) { printk_ratelimited(KERN_ERR @@ -105,7 +94,7 @@ static int fname_encrypt(struct inode *inode, return res; } - oname->len = ciphertext_len; + oname->len = cryptlen; return 0; } diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 82f0285f5d08..67fb6d8876d0 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -185,7 +185,7 @@ int get_crypt_info(struct inode *inode) struct crypto_skcipher *ctfm; const char *cipher_str; int keysize; - u8 raw_key[FS_MAX_KEY_SIZE]; + u8 *raw_key = NULL; int res; res = fscrypt_initialize(); @@ -238,6 +238,15 @@ retry: if (res) goto out; + /* + * This cannot be a stack buffer because it is passed to the scatterlist + * crypto API as part of key derivation. + */ + res = -ENOMEM; + raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); + if (!raw_key) + goto out; + if (fscrypt_dummy_context_enabled(inode)) { memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); goto got_key; @@ -276,7 +285,8 @@ got_key: if (res) goto out; - memzero_explicit(raw_key, sizeof(raw_key)); + kzfree(raw_key); + raw_key = NULL; if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { put_crypt_info(crypt_info); goto retry; @@ -287,7 +297,7 @@ out: if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); - memzero_explicit(raw_key, sizeof(raw_key)); + kzfree(raw_key); return res; } diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 282a51b07c57..a8a750f59621 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -235,6 +235,7 @@ struct ext4_io_submit { #define EXT4_MAX_BLOCK_SIZE 65536 #define EXT4_MIN_BLOCK_LOG_SIZE 10 #define EXT4_MAX_BLOCK_LOG_SIZE 16 +#define EXT4_MAX_CLUSTER_LOG_SIZE 30 #ifdef __KERNEL__ # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) #else diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 20da99da0a34..52b0530c5d65 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3565,7 +3565,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (blocksize < EXT4_MIN_BLOCK_SIZE || blocksize > EXT4_MAX_BLOCK_SIZE) { ext4_msg(sb, KERN_ERR, - "Unsupported filesystem blocksize %d", blocksize); + "Unsupported filesystem blocksize %d (%d log_block_size)", + blocksize, le32_to_cpu(es->s_log_block_size)); + goto failed_mount; + } + if (le32_to_cpu(es->s_log_block_size) > + (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { + ext4_msg(sb, KERN_ERR, + "Invalid log block size: %u", + le32_to_cpu(es->s_log_block_size)); goto failed_mount; } @@ -3697,6 +3705,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "block size (%d)", clustersize, blocksize); goto failed_mount; } + if (le32_to_cpu(es->s_log_cluster_size) > + (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { + ext4_msg(sb, KERN_ERR, + "Invalid log cluster size: %u", + le32_to_cpu(es->s_log_cluster_size)); + goto failed_mount; + } sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - le32_to_cpu(es->s_log_block_size); sbi->s_clusters_per_group = diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 6a4d0e5418a1..b3ebe512d64c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -286,6 +286,11 @@ const struct dentry_operations fuse_dentry_operations = { .d_release = fuse_dentry_release, }; +const struct dentry_operations fuse_root_dentry_operations = { + .d_init = fuse_dentry_init, + .d_release = fuse_dentry_release, +}; + int fuse_valid_type(int m) { return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || diff --git a/fs/fuse/file.c b/fs/fuse/file.c index abc66a6237fd..2401c5dabb2a 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, { struct inode *inode = page->mapping->host; + /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ + if (!copied) + goto unlock; + if (!PageUptodate(page)) { /* Zero any unwritten bytes at the end of the page */ size_t endoff = (pos + copied) & ~PAGE_MASK; @@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, fuse_write_update_size(inode, pos + copied); set_page_dirty(page); + +unlock: unlock_page(page); put_page(page); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0dfbb136e59a..91307940c8ac 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -692,6 +692,7 @@ static inline u64 get_node_id(struct inode *inode) extern const struct file_operations fuse_dev_operations; extern const struct dentry_operations fuse_dentry_operations; +extern const struct dentry_operations fuse_root_dentry_operations; /** * Inode to nodeid comparison. diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 17141099f2e7..6fe6a88ecb4a 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -1131,10 +1131,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) err = -ENOMEM; root = fuse_get_root_inode(sb, d.rootmode); + sb->s_d_op = &fuse_root_dentry_operations; root_dentry = d_make_root(root); if (!root_dentry) goto err_dev_free; - /* only now - we want root dentry with NULL ->d_op */ + /* Root dentry doesn't have .d_revalidate */ sb->s_d_op = &fuse_dentry_operations; init_req = fuse_request_alloc(0); diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c index 98b3eb7d8eaf..0ec137310320 100644 --- a/fs/isofs/rock.c +++ b/fs/isofs/rock.c @@ -377,9 +377,9 @@ repeat: { int p; for (p = 0; p < rr->u.ER.len_id; p++) - printk("%c", rr->u.ER.data[p]); + printk(KERN_CONT "%c", rr->u.ER.data[p]); } - printk("\n"); + printk(KERN_CONT "\n"); break; case SIG('P', 'X'): inode->i_mode = isonum_733(rr->u.PX.mode); diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 532d8e242d4d..484bebc20bca 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -197,7 +197,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, } ret = -EPROTONOSUPPORT; - if (minorversion == 0) + if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0) ret = nfs4_callback_up_net(serv, net); else if (xprt->ops->bc_up) ret = xprt->ops->bc_up(serv, net); diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 9b3a82abab07..1452177c822d 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -542,6 +542,13 @@ static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state) return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; } +static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state, + const nfs4_stateid *stateid) +{ + return test_bit(NFS_OPEN_STATE, &state->flags) && + nfs4_stateid_match_other(&state->open_stateid, stateid); +} + #else #define nfs4_close_state(a, b) do { } while (0) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 7897826d7c51..241da19b7da4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1451,7 +1451,6 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state) } static void nfs_clear_open_stateid_locked(struct nfs4_state *state, - nfs4_stateid *arg_stateid, nfs4_stateid *stateid, fmode_t fmode) { clear_bit(NFS_O_RDWR_STATE, &state->flags); @@ -1469,10 +1468,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, } if (stateid == NULL) return; - /* Handle races with OPEN */ - if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || - (nfs4_stateid_match_other(stateid, &state->open_stateid) && - !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { + /* Handle OPEN+OPEN_DOWNGRADE races */ + if (nfs4_stateid_match_other(stateid, &state->open_stateid) && + !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { nfs_resync_open_stateid_locked(state); return; } @@ -1486,7 +1484,9 @@ static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) { write_seqlock(&state->seqlock); - nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); + /* Ignore, if the CLOSE argment doesn't match the current stateid */ + if (nfs4_state_match_open_stateid_other(state, arg_stateid)) + nfs_clear_open_stateid_locked(state, stateid, fmode); write_sequnlock(&state->seqlock); if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) nfs4_schedule_state_manager(state->owner->so_server->nfs_client); @@ -2564,15 +2564,23 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) static int nfs41_check_expired_locks(struct nfs4_state *state) { int status, ret = NFS_OK; - struct nfs4_lock_state *lsp; + struct nfs4_lock_state *lsp, *prev = NULL; struct nfs_server *server = NFS_SERVER(state->inode); if (!test_bit(LK_STATE_IN_USE, &state->flags)) goto out; + + spin_lock(&state->state_lock); list_for_each_entry(lsp, &state->lock_states, ls_locks) { if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { struct rpc_cred *cred = lsp->ls_state->owner->so_cred; + atomic_inc(&lsp->ls_count); + spin_unlock(&state->state_lock); + + nfs4_put_lock_state(prev); + prev = lsp; + status = nfs41_test_and_free_expired_stateid(server, &lsp->ls_stateid, cred); @@ -2585,10 +2593,14 @@ static int nfs41_check_expired_locks(struct nfs4_state *state) set_bit(NFS_LOCK_LOST, &lsp->ls_flags); } else if (status != NFS_OK) { ret = status; - break; + nfs4_put_lock_state(prev); + goto out; } + spin_lock(&state->state_lock); } - }; + } + spin_unlock(&state->state_lock); + nfs4_put_lock_state(prev); out: return ret; } @@ -3122,7 +3134,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) } else if (is_rdwr) calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; - if (!nfs4_valid_open_stateid(state)) + if (!nfs4_valid_open_stateid(state) || + test_bit(NFS_OPEN_STATE, &state->flags) == 0) call_close = 0; spin_unlock(&state->owner->so_lock); @@ -5569,6 +5582,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) switch (task->tk_status) { case 0: renew_lease(data->res.server, data->timestamp); + break; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_EXPIRED: @@ -5579,8 +5593,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) case -NFS4ERR_OLD_STATEID: case -NFS4ERR_STALE_STATEID: task->tk_status = 0; - if (data->roc) - pnfs_roc_set_barrier(data->inode, data->roc_barrier); break; default: if (nfs4_async_handle_error(task, data->res.server, @@ -5590,6 +5602,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) } } data->rpc_status = task->tk_status; + if (data->roc && data->rpc_status == 0) + pnfs_roc_set_barrier(data->inode, data->roc_barrier); } static void nfs4_delegreturn_release(void *calldata) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5f4281ec5f72..0959c9661662 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1547,6 +1547,7 @@ restart: ssleep(1); case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: + case -NFS4ERR_OLD_STATEID: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_RECLAIM_BAD: case -NFS4ERR_RECLAIM_CONFLICT: diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index d484068ca716..38887cc5577f 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -114,6 +114,7 @@ static const struct seq_operations help_debug_ops = { }; const struct file_operations debug_help_fops = { + .owner = THIS_MODULE, .open = orangefs_debug_help_open, .read = seq_read, .release = seq_release, @@ -121,6 +122,7 @@ const struct file_operations debug_help_fops = { }; static const struct file_operations kernel_debug_fops = { + .owner = THIS_MODULE, .open = orangefs_debug_open, .read = orangefs_debug_read, .write = orangefs_debug_write, diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index edd46a0e951d..0e100856c7b8 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -328,11 +328,11 @@ static struct dentry *ovl_d_real(struct dentry *dentry, if (!real) goto bug; + /* Handle recursion */ + real = d_real(real, inode, open_flags); + if (!inode || inode == d_inode(real)) return real; - - /* Handle recursion */ - return d_real(real, inode, open_flags); bug: WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry, inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0); diff --git a/fs/splice.c b/fs/splice.c index dcaf185a5731..5a7750bd2eea 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -408,7 +408,8 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos, if (res <= 0) return -ENOMEM; - nr_pages = res / PAGE_SIZE; + BUG_ON(dummy); + nr_pages = DIV_ROUND_UP(res, PAGE_SIZE); vec = __vec; if (nr_pages > PIPE_DEF_BUFFERS) { diff --git a/fs/xattr.c b/fs/xattr.c index 3368659c471e..2d13b4e62fae 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -170,7 +170,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct inode *inode = dentry->d_inode; - int error = -EOPNOTSUPP; + int error = -EAGAIN; int issec = !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN); @@ -183,15 +183,21 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, security_inode_post_setxattr(dentry, name, value, size, flags); } - } else if (issec) { - const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; - + } else { if (unlikely(is_bad_inode(inode))) return -EIO; - error = security_inode_setsecurity(inode, suffix, value, - size, flags); - if (!error) - fsnotify_xattr(dentry); + } + if (error == -EAGAIN) { + error = -EOPNOTSUPP; + + if (issec) { + const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; + + error = security_inode_setsecurity(inode, suffix, value, + size, flags); + if (!error) + fsnotify_xattr(dentry); + } } return error; diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 1b949e08015c..c19700e2a2fe 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h @@ -230,72 +230,62 @@ struct acpi_table_facs { /* Fields common to all versions of the FADT */ struct acpi_table_fadt { - struct acpi_table_header header; /* [V1] Common ACPI table header */ - u32 facs; /* [V1] 32-bit physical address of FACS */ - u32 dsdt; /* [V1] 32-bit physical address of DSDT */ - u8 model; /* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ - u8 preferred_profile; /* [V1] Conveys preferred power management profile to OSPM. */ - u16 sci_interrupt; /* [V1] System vector of SCI interrupt */ - u32 smi_command; /* [V1] 32-bit Port address of SMI command port */ - u8 acpi_enable; /* [V1] Value to write to SMI_CMD to enable ACPI */ - u8 acpi_disable; /* [V1] Value to write to SMI_CMD to disable ACPI */ - u8 s4_bios_request; /* [V1] Value to write to SMI_CMD to enter S4BIOS state */ - u8 pstate_control; /* [V1] Processor performance state control */ - u32 pm1a_event_block; /* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */ - u32 pm1b_event_block; /* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */ - u32 pm1a_control_block; /* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */ - u32 pm1b_control_block; /* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */ - u32 pm2_control_block; /* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */ - u32 pm_timer_block; /* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ - u32 gpe0_block; /* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */ - u32 gpe1_block; /* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */ - u8 pm1_event_length; /* [V1] Byte Length of ports at pm1x_event_block */ - u8 pm1_control_length; /* [V1] Byte Length of ports at pm1x_control_block */ - u8 pm2_control_length; /* [V1] Byte Length of ports at pm2_control_block */ - u8 pm_timer_length; /* [V1] Byte Length of ports at pm_timer_block */ - u8 gpe0_block_length; /* [V1] Byte Length of ports at gpe0_block */ - u8 gpe1_block_length; /* [V1] Byte Length of ports at gpe1_block */ - u8 gpe1_base; /* [V1] Offset in GPE number space where GPE1 events start */ - u8 cst_control; /* [V1] Support for the _CST object and C-States change notification */ - u16 c2_latency; /* [V1] Worst case HW latency to enter/exit C2 state */ - u16 c3_latency; /* [V1] Worst case HW latency to enter/exit C3 state */ - u16 flush_size; /* [V1] Processor memory cache line width, in bytes */ - u16 flush_stride; /* [V1] Number of flush strides that need to be read */ - u8 duty_offset; /* [V1] Processor duty cycle index in processor P_CNT reg */ - u8 duty_width; /* [V1] Processor duty cycle value bit width in P_CNT register */ - u8 day_alarm; /* [V1] Index to day-of-month alarm in RTC CMOS RAM */ - u8 month_alarm; /* [V1] Index to month-of-year alarm in RTC CMOS RAM */ - u8 century; /* [V1] Index to century in RTC CMOS RAM */ - u16 boot_flags; /* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */ - u8 reserved; /* [V1] Reserved, must be zero */ - u32 flags; /* [V1] Miscellaneous flag bits (see below for individual flags) */ - /* End of Version 1 FADT fields (ACPI 1.0) */ - - struct acpi_generic_address reset_register; /* [V3] 64-bit address of the Reset register */ - u8 reset_value; /* [V3] Value to write to the reset_register port to reset the system */ - u16 arm_boot_flags; /* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ - u8 minor_revision; /* [V5] FADT Minor Revision (ACPI 5.1) */ - u64 Xfacs; /* [V3] 64-bit physical address of FACS */ - u64 Xdsdt; /* [V3] 64-bit physical address of DSDT */ - struct acpi_generic_address xpm1a_event_block; /* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */ - struct acpi_generic_address xpm1b_event_block; /* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */ - struct acpi_generic_address xpm1a_control_block; /* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */ - struct acpi_generic_address xpm1b_control_block; /* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */ - struct acpi_generic_address xpm2_control_block; /* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */ - struct acpi_generic_address xpm_timer_block; /* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ - struct acpi_generic_address xgpe0_block; /* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */ - struct acpi_generic_address xgpe1_block; /* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */ - /* End of Version 3 FADT fields (ACPI 2.0) */ - - struct acpi_generic_address sleep_control; /* [V4] 64-bit Sleep Control register (ACPI 5.0) */ - /* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */ - - struct acpi_generic_address sleep_status; /* [V5] 64-bit Sleep Status register (ACPI 5.0) */ - /* End of Version 5 FADT fields (ACPI 5.0) */ - - u64 hypervisor_id; /* [V6] Hypervisor Vendor ID (ACPI 6.0) */ - /* End of Version 6 FADT fields (ACPI 6.0) */ - + struct acpi_table_header header; /* Common ACPI table header */ + u32 facs; /* 32-bit physical address of FACS */ + u32 dsdt; /* 32-bit physical address of DSDT */ + u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ + u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */ + u16 sci_interrupt; /* System vector of SCI interrupt */ + u32 smi_command; /* 32-bit Port address of SMI command port */ + u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */ + u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */ + u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */ + u8 pstate_control; /* Processor performance state control */ + u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */ + u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */ + u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */ + u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */ + u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */ + u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ + u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */ + u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */ + u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */ + u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */ + u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */ + u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */ + u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */ + u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */ + u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */ + u8 cst_control; /* Support for the _CST object and C-States change notification */ + u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */ + u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */ + u16 flush_size; /* Processor memory cache line width, in bytes */ + u16 flush_stride; /* Number of flush strides that need to be read */ + u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */ + u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */ + u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ + u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ + u8 century; /* Index to century in RTC CMOS RAM */ + u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ + u8 reserved; /* Reserved, must be zero */ + u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ + struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ + u8 reset_value; /* Value to write to the reset_register port to reset the system */ + u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ + u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */ + u64 Xfacs; /* 64-bit physical address of FACS */ + u64 Xdsdt; /* 64-bit physical address of DSDT */ + struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */ + struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */ + struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */ + struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */ + struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */ + struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ + struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */ + struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ + struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */ + struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */ + u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */ }; /* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ @@ -311,8 +301,8 @@ struct acpi_table_fadt { /* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ -#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5] PSCI 0.2+ is implemented */ -#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */ +#define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */ +#define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */ /* Masks for FADT flags */ @@ -409,34 +399,20 @@ struct acpi_table_desc { * match the expected length. In other words, the length of the * FADT is the bottom line as to what the version really is. * - * NOTE: There is no officialy released V2 of the FADT. This - * version was used only for prototyping and testing during the - * 32-bit to 64-bit transition. V3 was the first official 64-bit - * version of the FADT. - * - * Update this list of defines when a new version of the FADT is - * added to the ACPI specification. Note that the FADT version is - * only incremented when new fields are appended to the existing - * version. Therefore, the FADT version is competely independent - * from the version of the ACPI specification where it is - * defined. - * - * For reference, the various FADT lengths are as follows: - * FADT V1 size: 0x074 ACPI 1.0 - * FADT V3 size: 0x0F4 ACPI 2.0 - * FADT V4 size: 0x100 ACPI 3.0 and ACPI 4.0 - * FADT V5 size: 0x10C ACPI 5.0 - * FADT V6 size: 0x114 ACPI 6.0 + * For reference, the values below are as follows: + * FADT V1 size: 0x074 + * FADT V2 size: 0x084 + * FADT V3 size: 0x0F4 + * FADT V4 size: 0x0F4 + * FADT V5 size: 0x10C + * FADT V6 size: 0x114 */ -#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) /* ACPI 1.0 */ -#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) /* ACPI 2.0 */ -#define ACPI_FADT_V4_SIZE (u32) (ACPI_FADT_OFFSET (sleep_status)) /* ACPI 3.0 and ACPI 4.0 */ -#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) /* ACPI 5.0 */ -#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) /* ACPI 6.0 */ +#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) +#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1) +#define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) +#define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) +#define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) -/* Update these when new FADT versions are added */ - -#define ACPI_FADT_MAX_VERSION 6 #define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" #endif /* __ACTBL_H__ */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index a5d98d171866..e861a24f06f2 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -191,6 +191,9 @@ #ifndef __init #define __init #endif +#ifndef __iomem +#define __iomem +#endif /* Host-dependent types and defines for user-space ACPICA */ diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h index 63554e9f6e0c..59a3b2f58c22 100644 --- a/include/asm-generic/export.h +++ b/include/asm-generic/export.h @@ -54,6 +54,7 @@ KSYM(__kstrtab_\name): KSYM(__kcrctab_\name): __put KSYM(__crc_\name) .weak KSYM(__crc_\name) + .set KSYM(__crc_\name), 0 .previous #endif #endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7035b997aaa5..6aaf425cebc3 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -14,7 +14,7 @@ * are obviously wrong for any sort of memory access. */ #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) -#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) +#define BPF_REGISTER_MIN_RANGE -1 struct bpf_reg_state { enum bpf_reg_type type; @@ -22,7 +22,8 @@ struct bpf_reg_state { * Used to determine if any memory access using this register will * result in a bad access. */ - u64 min_value, max_value; + s64 min_value; + u64 max_value; union { /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ s64 imm; diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 432f5c97e18f..928e5ca0caee 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -263,7 +263,9 @@ #endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ -#if GCC_VERSION >= 50000 +#if GCC_VERSION >= 70000 +#define KASAN_ABI_VERSION 5 +#elif GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 #elif GCC_VERSION >= 40902 #define KASAN_ABI_VERSION 3 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9b9f65d99873..e35e6de633b9 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned char *vec); extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, - pmd_t *old_pmd, pmd_t *new_pmd); + pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2d9b650047a5..d49e26c6cdc7 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -429,6 +429,7 @@ struct intel_iommu { struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ struct idr pasid_idr; + u32 pasid_max; #endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ca1ad9ebbc92..a0649973ee5b 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -149,7 +149,7 @@ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) { #if defined(CONFIG_NET_L3_MASTER_DEV) if (!net->ipv4.sysctl_tcp_l3mdev_accept && - ipv6_l3mdev_skb(IP6CB(skb)->flags)) + skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) return true; #endif return false; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 3be7abd6e722..c9f379689dd0 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -476,7 +476,6 @@ enum { enum { MLX4_INTERFACE_STATE_UP = 1 << 0, MLX4_INTERFACE_STATE_DELETION = 1 << 1, - MLX4_INTERFACE_STATE_SHUTDOWN = 1 << 2, }; #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 91ee3643ccc8..e16a2a980ea8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1619,7 +1619,7 @@ enum netdev_priv_flags { * @dcbnl_ops: Data Center Bridging netlink ops * @num_tc: Number of traffic classes in the net device * @tc_to_txq: XXX: need comments on this one - * @prio_tc_map XXX: need comments on this one + * @prio_tc_map: XXX: need comments on this one * * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp * @@ -3354,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb); +static __always_inline int ____dev_forward_skb(struct net_device *dev, + struct sk_buff *skb) +{ + if (skb_orphan_frags(skb, GFP_ATOMIC) || + unlikely(!is_skb_forwardable(dev, skb))) { + atomic_long_inc(&dev->rx_dropped); + kfree_skb(skb); + return NET_RX_DROP; + } + + skb_scrub_packet(skb, true); + skb->priority = 0; + return 0; +} + void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); extern int netdev_budget; diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 2ab233661ae5..a58cca8bcb29 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -29,6 +29,7 @@ struct phy_device *of_phy_attach(struct net_device *dev, extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); extern int of_mdio_parse_addr(struct device *dev, const struct device_node *np); extern int of_phy_register_fixed_link(struct device_node *np); +extern void of_phy_deregister_fixed_link(struct device_node *np); extern bool of_phy_is_fixed_link(struct device_node *np); #else /* CONFIG_OF */ @@ -83,6 +84,9 @@ static inline int of_phy_register_fixed_link(struct device_node *np) { return -ENOSYS; } +static inline void of_phy_deregister_fixed_link(struct device_node *np) +{ +} static inline bool of_phy_is_fixed_link(struct device_node *np) { return false; diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index dd15d39e1985..7dbe9148b2f8 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -374,16 +374,13 @@ static inline struct page *read_mapping_page(struct address_space *mapping, } /* - * Get the offset in PAGE_SIZE. - * (TODO: hugepage should have ->index in PAGE_SIZE) + * Get index of the page with in radix-tree + * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) */ -static inline pgoff_t page_to_pgoff(struct page *page) +static inline pgoff_t page_to_index(struct page *page) { pgoff_t pgoff; - if (unlikely(PageHeadHuge(page))) - return page->index << compound_order(page); - if (likely(!PageTransTail(page))) return page->index; @@ -396,6 +393,18 @@ static inline pgoff_t page_to_pgoff(struct page *page) return pgoff; } +/* + * Get the offset in PAGE_SIZE. + * (TODO: hugepage should have ->index in PAGE_SIZE) + */ +static inline pgoff_t page_to_pgoff(struct page *page) +{ + if (unlikely(PageHeadHuge(page))) + return page->index << compound_order(page); + + return page_to_index(page); +} + /* * Return byte-offset into filesystem object for page. */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 0e49f70dbd9b..a38772a85588 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1928,6 +1928,20 @@ static inline int pci_pcie_type(const struct pci_dev *dev) return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } +static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) +{ + while (1) { + if (!pci_is_pcie(dev)) + break; + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return dev; + if (!dev->bus->self) + break; + dev = dev->bus->self; + } + return NULL; +} + void pci_request_acs(void); bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); bool pci_acs_path_enabled(struct pci_dev *start, diff --git a/include/linux/sched.h b/include/linux/sched.h index 348f51b0ec92..e9c009dc3a4a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2567,6 +2567,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p); extern void sched_autogroup_detach(struct task_struct *p); extern void sched_autogroup_fork(struct signal_struct *sig); extern void sched_autogroup_exit(struct signal_struct *sig); +extern void sched_autogroup_exit_task(struct task_struct *p); #ifdef CONFIG_PROC_FS extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); @@ -2576,6 +2577,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { } static inline void sched_autogroup_detach(struct task_struct *p) { } static inline void sched_autogroup_fork(struct signal_struct *sig) { } static inline void sched_autogroup_exit(struct signal_struct *sig) { } +static inline void sched_autogroup_exit_task(struct task_struct *p) { } #endif extern int yield_to(struct task_struct *p, bool preempt); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ab02a457da1f..e5d193440374 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -25,6 +25,7 @@ struct svc_xprt_ops { void (*xpo_detach)(struct svc_xprt *); void (*xpo_free)(struct svc_xprt *); int (*xpo_secure_port)(struct svc_rqst *); + void (*xpo_kill_temp_xprt)(struct svc_xprt *); }; struct svc_xprt_class { diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index f00bf667ec33..554671c81f4a 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1018,7 +1018,7 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) } struct hci_dev *hci_dev_get(int index); -struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src); +struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); struct hci_dev *hci_alloc_dev(void); void hci_free_dev(struct hci_dev *hdev); diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h index d15214d673b2..2a1abbf8da74 100644 --- a/include/net/gro_cells.h +++ b/include/net/gro_cells.h @@ -68,6 +68,9 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); __skb_queue_head_init(&cell->napi_skbs); + + set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); + netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); napi_enable(&cell->napi); } diff --git a/include/net/ip.h b/include/net/ip.h index 5413883ac47f..d3a107850a41 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -47,8 +47,7 @@ struct inet_skb_parm { #define IPSKB_REROUTED BIT(4) #define IPSKB_DOREDIRECT BIT(5) #define IPSKB_FRAG_PMTU BIT(6) -#define IPSKB_FRAG_SEGS BIT(7) -#define IPSKB_L3SLAVE BIT(8) +#define IPSKB_L3SLAVE BIT(7) u16 frag_max_size; }; diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 20ed9699fcd4..1b1cf33cbfb0 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h @@ -146,6 +146,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, { int pkt_len, err; + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); pkt_len = skb->len - skb_inner_network_offset(skb); err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); if (unlikely(net_xmit_eval(err))) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index b9314b48e39f..f390c3bb05c5 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -243,6 +243,7 @@ int fib_table_dump(struct fib_table *table, struct sk_buff *skb, struct netlink_callback *cb); int fib_table_flush(struct net *net, struct fib_table *table); struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); +void fib_table_flush_external(struct fib_table *table); void fib_free_table(struct fib_table *tb); #ifndef CONFIG_IP_MULTIPLE_TABLES diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 8fed1cd78658..f11ca837361b 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -970,6 +970,8 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); +int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, + int addr_len); int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, int addr_len); diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index fc4f757107df..0940598c002f 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -170,7 +170,7 @@ static inline struct net *copy_net_ns(unsigned long flags, extern struct list_head net_namespace_list; struct net *get_net_ns_by_pid(pid_t pid); -struct net *get_net_ns_by_fd(int pid); +struct net *get_net_ns_by_fd(int fd); #ifdef CONFIG_SYSCTL void ipx_register_sysctl(void); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 50418052a520..d9d52c020a70 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -100,6 +100,9 @@ struct nf_conn { possible_net_t ct_net; +#if IS_ENABLED(CONFIG_NF_NAT) + struct rhlist_head nat_bysource; +#endif /* all members below initialized via memset */ u8 __nfct_init_offset[0]; @@ -117,9 +120,6 @@ struct nf_conn { /* Extensions */ struct nf_ct_ext *ext; -#if IS_ENABLED(CONFIG_NF_NAT) - struct rhash_head nat_bysource; -#endif /* Storage reserved for other modules, must be the last member */ union nf_conntrack_proto proto; }; diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index 498814626e28..1723a67c0b0a 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -30,8 +30,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) if (net->ct.labels_used == 0) return NULL; - return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS, - sizeof(struct nf_conn_labels), GFP_ATOMIC); + return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); #else return NULL; #endif diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5031e072567b..b02af0bf5777 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -145,7 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type) return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; } -unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); +int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); unsigned int nft_parse_register(const struct nlattr *attr); int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); @@ -313,7 +313,7 @@ void nft_unregister_set(struct nft_set_ops *ops); * @size: maximum set size * @nelems: number of elements * @ndeact: number of deactivated elements queued for removal - * @timeout: default timeout value in msecs + * @timeout: default timeout value in jiffies * @gc_int: garbage collection interval in msecs * @policy: set parameterization (see enum nft_set_policies) * @udlen: user data length @@ -542,7 +542,8 @@ void *nft_set_elem_init(const struct nft_set *set, const struct nft_set_ext_tmpl *tmpl, const u32 *key, const u32 *data, u64 timeout, gfp_t gfp); -void nft_set_elem_destroy(const struct nft_set *set, void *elem); +void nft_set_elem_destroy(const struct nft_set *set, void *elem, + bool destroy_expr); /** * struct nft_set_gc_batch_head - nf_tables set garbage collection batch @@ -693,7 +694,6 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) { int err; - __module_get(src->ops->type->owner); if (src->ops->clone) { dst->ops = src->ops; err = src->ops->clone(dst, src); @@ -702,6 +702,8 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) } else { memcpy(dst, src, src->ops->size); } + + __module_get(src->ops->type->owner); return 0; } diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 87a7f42e7639..31acc3f4f132 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -152,7 +152,7 @@ void sctp_unhash_endpoint(struct sctp_endpoint *); struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, struct sctphdr *, struct sctp_association **, struct sctp_transport **); -void sctp_err_finish(struct sock *, struct sctp_association *); +void sctp_err_finish(struct sock *, struct sctp_transport *); void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, struct sctp_transport *t, __u32 pmtu); void sctp_icmp_redirect(struct sock *, struct sctp_transport *, diff --git a/include/net/sock.h b/include/net/sock.h index 73c6b008f1b7..92b269709b9a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1596,11 +1596,11 @@ static inline void sock_put(struct sock *sk) void sock_gen_put(struct sock *sk); int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, - unsigned int trim_cap); + unsigned int trim_cap, bool refcounted); static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) { - return __sk_receive_skb(sk, skb, nested, 1); + return __sk_receive_skb(sk, skb, nested, 1, true); } static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) diff --git a/include/net/tcp.h b/include/net/tcp.h index 5b82d4d94834..123979fe12bf 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -805,7 +805,7 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) if (!net->ipv4.sysctl_tcp_l3mdev_accept && - ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) + skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) return true; #endif return false; @@ -1220,6 +1220,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); +int tcp_filter(struct sock *sk, struct sk_buff *skb); #undef STATE_TRACE diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h index 5cd4d4d2dd1d..9c9c6ad55f14 100644 --- a/include/uapi/linux/atm_zatm.h +++ b/include/uapi/linux/atm_zatm.h @@ -14,7 +14,6 @@ #include #include -#include #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) /* get pool statistics */ diff --git a/include/uapi/linux/bpqether.h b/include/uapi/linux/bpqether.h index a6c35e1a89ad..05865edaefda 100644 --- a/include/uapi/linux/bpqether.h +++ b/include/uapi/linux/bpqether.h @@ -5,9 +5,7 @@ * Defines for the BPQETHER pseudo device driver */ -#ifndef __LINUX_IF_ETHER_H #include -#endif #define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ #define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index d6d071fc3c56..3af60ee69053 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -640,7 +640,7 @@ * Control a data application associated with the currently viewed channel, * e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.) */ -#define KEY_DATA 0x275 +#define KEY_DATA 0x277 #define BTN_TRIGGER_HAPPY 0x2c0 #define BTN_TRIGGER_HAPPY1 0x2c0 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 300ef255d1e0..4ee67cb99143 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -972,12 +972,19 @@ struct kvm_irqfd { __u8 pad[16]; }; +/* For KVM_CAP_ADJUST_CLOCK */ + +/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */ +#define KVM_CLOCK_TSC_STABLE 2 + struct kvm_clock_data { __u64 clock; __u32 flags; __u32 pad[9]; }; +/* For KVM_CAP_SW_TLB */ + #define KVM_MMU_FSL_BOOKE_NOHV 0 #define KVM_MMU_FSL_BOOKE_HV 1 diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index e3969bd939e4..9611c7b6c18f 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild @@ -11,3 +11,4 @@ header-y += tc_vlan.h header-y += tc_bpf.h header-y += tc_connmark.h header-y += tc_ife.h +header-y += tc_tunnel_key.h diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c index 8a09b32e07d6..dd4104c9aa12 100644 --- a/init/do_mounts_rd.c +++ b/init/do_mounts_rd.c @@ -272,7 +272,7 @@ int __init rd_load_image(char *from) sys_write(out_fd, buf, BLOCK_SIZE); #if !defined(CONFIG_S390) if (!(i % 16)) { - printk("%c\b", rotator[rotate & 0x3]); + pr_cont("%c\b", rotator[rotate & 0x3]); rotate++; } #endif diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 570eeca7bdfa..ad1bc67aff1b 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab) hlist_for_each_entry_safe(l, n, head, hash_node) { hlist_del_rcu(&l->hash_node); - htab_elem_free(htab, l); + if (l->state != HTAB_EXTRA_ELEM_USED) + htab_elem_free(htab, l); } } } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 228f962447a5..237f3d6a7ddc 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -194,7 +194,7 @@ static int map_create(union bpf_attr *attr) err = bpf_map_charge_memlock(map); if (err) - goto free_map; + goto free_map_nouncharge; err = bpf_map_new_fd(map); if (err < 0) @@ -204,6 +204,8 @@ static int map_create(union bpf_attr *attr) return err; free_map: + bpf_map_uncharge_memlock(map); +free_map_nouncharge: map->ops->map_free(map); return err; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 99a7e5b388f2..8199821f54cf 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -216,8 +216,8 @@ static void print_verifier_state(struct bpf_verifier_state *state) reg->map_ptr->key_size, reg->map_ptr->value_size); if (reg->min_value != BPF_REGISTER_MIN_RANGE) - verbose(",min_value=%llu", - (unsigned long long)reg->min_value); + verbose(",min_value=%lld", + (long long)reg->min_value); if (reg->max_value != BPF_REGISTER_MAX_RANGE) verbose(",max_value=%llu", (unsigned long long)reg->max_value); @@ -758,7 +758,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, * index'es we need to make sure that whatever we use * will have a set floor within our range. */ - if ((s64)reg->min_value < 0) { + if (reg->min_value < 0) { verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; @@ -1468,7 +1468,8 @@ static void check_reg_overflow(struct bpf_reg_state *reg) { if (reg->max_value > BPF_REGISTER_MAX_RANGE) reg->max_value = BPF_REGISTER_MAX_RANGE; - if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE) + if (reg->min_value < BPF_REGISTER_MIN_RANGE || + reg->min_value > BPF_REGISTER_MAX_RANGE) reg->min_value = BPF_REGISTER_MIN_RANGE; } @@ -1476,7 +1477,8 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; - u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE; + s64 min_val = BPF_REGISTER_MIN_RANGE; + u64 max_val = BPF_REGISTER_MAX_RANGE; bool min_set = false, max_set = false; u8 opcode = BPF_OP(insn->code); @@ -1512,22 +1514,43 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, return; } + /* If one of our values was at the end of our ranges then we can't just + * do our normal operations to the register, we need to set the values + * to the min/max since they are undefined. + */ + if (min_val == BPF_REGISTER_MIN_RANGE) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + if (max_val == BPF_REGISTER_MAX_RANGE) + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; + switch (opcode) { case BPF_ADD: - dst_reg->min_value += min_val; - dst_reg->max_value += max_val; + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) + dst_reg->min_value += min_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value += max_val; break; case BPF_SUB: - dst_reg->min_value -= min_val; - dst_reg->max_value -= max_val; + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) + dst_reg->min_value -= min_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value -= max_val; break; case BPF_MUL: - dst_reg->min_value *= min_val; - dst_reg->max_value *= max_val; + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) + dst_reg->min_value *= min_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value *= max_val; break; case BPF_AND: - /* & is special since it could end up with 0 bits set. */ - dst_reg->min_value &= min_val; + /* Disallow AND'ing of negative numbers, ain't nobody got time + * for that. Otherwise the minimum is 0 and the max is the max + * value we could AND against. + */ + if (min_val < 0) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + else + dst_reg->min_value = 0; dst_reg->max_value = max_val; break; case BPF_LSH: @@ -1537,24 +1560,25 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, */ if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - else + else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) dst_reg->min_value <<= min_val; if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) dst_reg->max_value = BPF_REGISTER_MAX_RANGE; - else + else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) dst_reg->max_value <<= max_val; break; case BPF_RSH: - dst_reg->min_value >>= min_val; - dst_reg->max_value >>= max_val; - break; - case BPF_MOD: - /* % is special since it is an unsigned modulus, so the floor - * will always be 0. + /* RSH by a negative number is undefined, and the BPF_RSH is an + * unsigned shift, so make the appropriate casts. */ - dst_reg->min_value = 0; - dst_reg->max_value = max_val - 1; + if (min_val < 0 || dst_reg->min_value < 0) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + else + dst_reg->min_value = + (u64)(dst_reg->min_value) >> min_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value >>= max_val; break; default: reset_reg_range_values(regs, insn->dst_reg); @@ -2430,6 +2454,7 @@ static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { + bool varlen_map_access = env->varlen_map_value_access; struct bpf_reg_state *rold, *rcur; int i; @@ -2443,12 +2468,17 @@ static bool states_equal(struct bpf_verifier_env *env, /* If the ranges were not the same, but everything else was and * we didn't do a variable access into a map then we are a-ok. */ - if (!env->varlen_map_value_access && + if (!varlen_map_access && rold->type == rcur->type && rold->imm == rcur->imm) continue; + /* If we didn't map access then again we don't care about the + * mismatched range values and it's ok if our old type was + * UNKNOWN and we didn't go to a NOT_INIT'ed reg. + */ if (rold->type == NOT_INIT || - (rold->type == UNKNOWN_VALUE && rcur->type != NOT_INIT)) + (!varlen_map_access && rold->type == UNKNOWN_VALUE && + rcur->type != NOT_INIT)) continue; if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && diff --git a/kernel/events/core.c b/kernel/events/core.c index 0e292132efac..6ee1febdf6ff 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event, * this will always be called from the right CPU. */ cpuctx = __get_cpu_context(ctx); + + /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */ + if (perf_cgroup_from_task(current, ctx) != event->cgrp) { + /* + * We are removing the last cpu event in this context. + * If that event is not active in this cpu, cpuctx->cgrp + * should've been cleared by perf_cgroup_switch. + */ + WARN_ON_ONCE(!add && cpuctx->cgrp); + return; + } cpuctx->cgrp = add ? event->cgrp : NULL; } @@ -8018,6 +8029,7 @@ restart: * if is not specified, the range is treated as a single address. */ enum { + IF_ACT_NONE = -1, IF_ACT_FILTER, IF_ACT_START, IF_ACT_STOP, @@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = { { IF_SRC_KERNEL, "%u/%u" }, { IF_SRC_FILEADDR, "%u@%s" }, { IF_SRC_KERNELADDR, "%u" }, + { IF_ACT_NONE, NULL }, }; /* diff --git a/kernel/exit.c b/kernel/exit.c index 9d68c45ebbe3..3076f3089919 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -836,6 +836,7 @@ void __noreturn do_exit(long code) */ perf_event_exit_task(tsk); + sched_autogroup_exit_task(tsk); cgroup_exit(tsk); /* diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9c4d30483264..6b669593e7eb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) } else if (new->flags & IRQF_TRIGGER_MASK) { unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; - unsigned int omsk = irq_settings_get_trigger_mask(desc); + unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); if (nmsk != omsk) /* hope the handler works with current trigger mode */ pr_warn("irq %d uses trigger mode %u; requested %u\n", - irq, nmsk, omsk); + irq, omsk, nmsk); } *old_ptr = new; diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index 51c4b24b6328..c2b88490d857 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -45,6 +45,14 @@ enum { #define LOCKF_USED_IN_IRQ_READ \ (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) +/* + * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text, + * .data and .bss to fit in required 32MB limit for the kernel. With + * PROVE_LOCKING we could go over this limit and cause system boot-up problems. + * So, reduce the static allocations for lockdeps related structures so that + * everything fits in current required size limit. + */ +#ifdef CONFIG_PROVE_LOCKING_SMALL /* * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies * we track. @@ -54,18 +62,24 @@ enum { * table (if it's not there yet), and we check it for lock order * conflicts and deadlocks. */ +#define MAX_LOCKDEP_ENTRIES 16384UL +#define MAX_LOCKDEP_CHAINS_BITS 15 +#define MAX_STACK_TRACE_ENTRIES 262144UL +#else #define MAX_LOCKDEP_ENTRIES 32768UL #define MAX_LOCKDEP_CHAINS_BITS 16 -#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) - -#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) /* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the hash_lock. */ #define MAX_STACK_TRACE_ENTRIES 524288UL +#endif + +#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) + +#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) extern struct list_head all_lock_classes; extern struct lock_chain lock_chains[]; diff --git a/kernel/module.c b/kernel/module.c index f57dd63186e6..0e54d5bf0097 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -1301,8 +1301,9 @@ static int check_version(Elf_Shdr *sechdrs, goto bad_version; } - pr_warn("%s: no symbol version for %s\n", mod->name, symname); - return 0; + /* Broken toolchain. Warn once, then let it go.. */ + pr_warn_once("%s: no symbol version for %s\n", mod->name, symname); + return 1; bad_version: pr_warn("%s: disagrees about version of symbol %s\n", diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 5028f4fd504a..f7a55e9ff2f7 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -783,8 +783,6 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) return ret; } -static void cont_flush(void); - static ssize_t devkmsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -800,7 +798,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, if (ret) return ret; raw_spin_lock_irq(&logbuf_lock); - cont_flush(); while (user->seq == log_next_seq) { if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; @@ -863,7 +860,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) return -ESPIPE; raw_spin_lock_irq(&logbuf_lock); - cont_flush(); switch (whence) { case SEEK_SET: /* the first record */ @@ -902,7 +898,6 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait) poll_wait(file, &log_wait, wait); raw_spin_lock_irq(&logbuf_lock); - cont_flush(); if (user->seq < log_next_seq) { /* return error when data has vanished underneath us */ if (user->seq < log_first_seq) @@ -1289,7 +1284,6 @@ static int syslog_print(char __user *buf, int size) size_t skip; raw_spin_lock_irq(&logbuf_lock); - cont_flush(); if (syslog_seq < log_first_seq) { /* messages are gone, move to first one */ syslog_seq = log_first_seq; @@ -1349,7 +1343,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) return -ENOMEM; raw_spin_lock_irq(&logbuf_lock); - cont_flush(); if (buf) { u64 next_seq; u64 seq; @@ -1511,7 +1504,6 @@ int do_syslog(int type, char __user *buf, int len, int source) /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: raw_spin_lock_irq(&logbuf_lock); - cont_flush(); if (syslog_seq < log_first_seq) { /* messages are gone, move to first one */ syslog_seq = log_first_seq; @@ -3028,7 +3020,6 @@ void kmsg_dump(enum kmsg_dump_reason reason) dumper->active = true; raw_spin_lock_irqsave(&logbuf_lock, flags); - cont_flush(); dumper->cur_seq = clear_seq; dumper->cur_idx = clear_idx; dumper->next_seq = log_next_seq; @@ -3119,7 +3110,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, bool ret; raw_spin_lock_irqsave(&logbuf_lock, flags); - cont_flush(); ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); raw_spin_unlock_irqrestore(&logbuf_lock, flags); @@ -3162,7 +3152,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, goto out; raw_spin_lock_irqsave(&logbuf_lock, flags); - cont_flush(); if (dumper->cur_seq < log_first_seq) { /* messages are gone, move to first available one */ dumper->cur_seq = log_first_seq; diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index a5d966cb8891..f1c8fd566246 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) { if (tg != &root_task_group) return false; - /* - * We can only assume the task group can't go away on us if - * autogroup_move_group() can see us on ->thread_group list. + * If we race with autogroup_move_group() the caller can use the old + * value of signal->autogroup but in this case sched_move_task() will + * be called again before autogroup_kref_put(). + * + * However, there is no way sched_autogroup_exit_task() could tell us + * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. */ if (p->flags & PF_EXITING) return false; @@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) return true; } +void sched_autogroup_exit_task(struct task_struct *p) +{ + /* + * We are going to call exit_notify() and autogroup_move_group() can't + * see this thread after that: we can no longer use signal->autogroup. + * See the PF_EXITING check in task_wants_autogroup(). + */ + sched_move_task(p); +} + static void autogroup_move_group(struct task_struct *p, struct autogroup *ag) { @@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) } p->signal->autogroup = autogroup_kref_get(ag); - - if (!READ_ONCE(sysctl_sched_autogroup_enabled)) - goto out; - + /* + * We can't avoid sched_move_task() after we changed signal->autogroup, + * this process can already run with task_group() == prev->tg or we can + * race with cgroup code which can read autogroup = prev under rq->lock. + * In the latter case for_each_thread() can not miss a migrating thread, + * cpu_cgroup_attach() must not be possible after cgroup_exit() and it + * can't be removed from thread list, we hold ->siglock. + * + * If an exiting thread was already removed from thread list we rely on + * sched_autogroup_exit_task(). + */ for_each_thread(p, t) sched_move_task(t); -out: + unlock_task_sighand(p, &flags); autogroup_kref_put(prev); } diff --git a/kernel/taskstats.c b/kernel/taskstats.c index b3f05ee20d18..cbb387a265db 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1 [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; -static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { +/* + * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family. + * Make sure they are always aligned. + */ +static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = { [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, }; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2050a7652a86..da87b3cba5b3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1862,6 +1862,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, /* Update rec->flags */ do_for_each_ftrace_rec(pg, rec) { + + if (rec->flags & FTRACE_FL_DISABLED) + continue; + /* We need to update only differences of filter_hash */ in_old = !!ftrace_lookup_ip(old_hash, rec->ip); in_new = !!ftrace_lookup_ip(new_hash, rec->ip); @@ -1884,6 +1888,10 @@ rollback: /* Roll back what we did above */ do_for_each_ftrace_rec(pg, rec) { + + if (rec->flags & FTRACE_FL_DISABLED) + continue; + if (rec == end) goto err_out; @@ -2397,6 +2405,10 @@ void __weak ftrace_replace_code(int enable) return; do_for_each_ftrace_rec(pg, rec) { + + if (rec->flags & FTRACE_FL_DISABLED) + continue; + failed = __ftrace_replace_code(rec, enable); if (failed) { ftrace_bug(failed, rec); @@ -2763,7 +2775,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) struct dyn_ftrace *rec; do_for_each_ftrace_rec(pg, rec) { - if (FTRACE_WARN_ON_ONCE(rec->flags)) + if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) pr_warn(" %pS flags:%lx\n", (void *)rec->ip, rec->flags); } while_for_each_ftrace_rec(); @@ -3598,6 +3610,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod) goto out_unlock; do_for_each_ftrace_rec(pg, rec) { + + if (rec->flags & FTRACE_FL_DISABLED) + continue; + if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { ret = enter_record(hash, rec, clear_filter); if (ret < 0) { @@ -3793,6 +3809,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, do_for_each_ftrace_rec(pg, rec) { + if (rec->flags & FTRACE_FL_DISABLED) + continue; + if (!ftrace_match_record(rec, &func_g, NULL, 0)) continue; @@ -4685,6 +4704,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) do_for_each_ftrace_rec(pg, rec) { + if (rec->flags & FTRACE_FL_DISABLED) + continue; + if (ftrace_match_record(rec, &func_g, NULL, 0)) { /* if it is in the array */ exists = false; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b01e547d4d04..a6c8db1d62f6 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1085,6 +1085,9 @@ config PROVE_LOCKING For more details, see Documentation/locking/lockdep-design.txt. +config PROVE_LOCKING_SMALL + bool + config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT diff --git a/lib/debugobjects.c b/lib/debugobjects.c index a8e12601eb37..056052dc8e91 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -362,6 +362,7 @@ void debug_object_init(void *addr, struct debug_obj_descr *descr) __debug_object_init(addr, descr, 0); } +EXPORT_SYMBOL_GPL(debug_object_init); /** * debug_object_init_on_stack - debug checks when an object on stack is @@ -376,6 +377,7 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) __debug_object_init(addr, descr, 1); } +EXPORT_SYMBOL_GPL(debug_object_init_on_stack); /** * debug_object_activate - debug checks when an object is activated @@ -449,6 +451,7 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr) } return 0; } +EXPORT_SYMBOL_GPL(debug_object_activate); /** * debug_object_deactivate - debug checks when an object is deactivated @@ -496,6 +499,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr) raw_spin_unlock_irqrestore(&db->lock, flags); } +EXPORT_SYMBOL_GPL(debug_object_deactivate); /** * debug_object_destroy - debug checks when an object is destroyed @@ -542,6 +546,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr) out_unlock: raw_spin_unlock_irqrestore(&db->lock, flags); } +EXPORT_SYMBOL_GPL(debug_object_destroy); /** * debug_object_free - debug checks when an object is freed @@ -582,6 +587,7 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr) out_unlock: raw_spin_unlock_irqrestore(&db->lock, flags); } +EXPORT_SYMBOL_GPL(debug_object_free); /** * debug_object_assert_init - debug checks when object should be init-ed @@ -626,6 +632,7 @@ void debug_object_assert_init(void *addr, struct debug_obj_descr *descr) raw_spin_unlock_irqrestore(&db->lock, flags); } +EXPORT_SYMBOL_GPL(debug_object_assert_init); /** * debug_object_active_state - debug checks object usage state machine @@ -673,6 +680,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr, raw_spin_unlock_irqrestore(&db->lock, flags); } +EXPORT_SYMBOL_GPL(debug_object_active_state); #ifdef CONFIG_DEBUG_OBJECTS_FREE static void __debug_check_no_obj_freed(const void *address, unsigned long size) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f0c7f1481bae..f2bd21b93dfc 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -683,10 +683,11 @@ static void pipe_advance(struct iov_iter *i, size_t size) struct pipe_inode_info *pipe = i->pipe; struct pipe_buffer *buf; int idx = i->idx; - size_t off = i->iov_offset; + size_t off = i->iov_offset, orig_sz; if (unlikely(i->count < size)) size = i->count; + orig_sz = size; if (size) { if (off) /* make it relative to the beginning of buffer */ @@ -713,6 +714,7 @@ static void pipe_advance(struct iov_iter *i, size_t size) pipe->nrbufs--; } } + i->count -= orig_sz; } void iov_iter_advance(struct iov_iter *i, size_t size) diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index 5464c8744ea9..e24388a863a7 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c @@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) if (!esize) { /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 * depending on if MOD equals 1. */ - rp[0] = 1; res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; + if (res->nlimbs) { + if (mpi_resize(res, 1) < 0) + goto enomem; + rp = res->d; + rp[0] = 1; + } res->sign = 0; goto leave; } diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 5e51872b3fc1..fbdf87920093 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -20,6 +20,11 @@ #include #include +/* + * Note: test functions are marked noinline so that their names appear in + * reports. + */ + static noinline void __init kmalloc_oob_right(void) { char *ptr; @@ -411,6 +416,29 @@ static noinline void __init copy_user_test(void) kfree(kmem); } +static noinline void __init use_after_scope_test(void) +{ + volatile char *volatile p; + + pr_info("use-after-scope on int\n"); + { + int local = 0; + + p = (char *)&local; + } + p[0] = 1; + p[3] = 1; + + pr_info("use-after-scope on array\n"); + { + char local[1024] = {0}; + + p = local; + } + p[0] = 1; + p[1023] = 1; +} + static int __init kmalloc_tests_init(void) { kmalloc_oob_right(); @@ -436,6 +464,7 @@ static int __init kmalloc_tests_init(void) kasan_global_oob(); ksize_unpoisons_memory(); copy_user_test(); + use_after_scope_test(); return -EAGAIN; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cdcd25cb30fe..d4a6e4001512 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1426,11 +1426,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, - pmd_t *old_pmd, pmd_t *new_pmd) + pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) { spinlock_t *old_ptl, *new_ptl; pmd_t pmd; struct mm_struct *mm = vma->vm_mm; + bool force_flush = false; if ((old_addr & ~HPAGE_PMD_MASK) || (new_addr & ~HPAGE_PMD_MASK) || @@ -1456,6 +1457,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); + if (pmd_present(pmd) && pmd_dirty(pmd)) + force_flush = true; VM_BUG_ON(!pmd_none(*new_pmd)); if (pmd_move_must_withdraw(new_ptl, old_ptl) && @@ -1467,6 +1470,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); if (new_ptl != old_ptl) spin_unlock(new_ptl); + if (force_flush) + flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); + else + *need_flush = true; spin_unlock(old_ptl); return true; } diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 70c009741aab..0e9505f66ec1 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -764,6 +764,25 @@ EXPORT_SYMBOL(__asan_storeN_noabort); void __asan_handle_no_return(void) {} EXPORT_SYMBOL(__asan_handle_no_return); +/* Emitted by compiler to poison large objects when they go out of scope. */ +void __asan_poison_stack_memory(const void *addr, size_t size) +{ + /* + * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded + * by redzones, so we simply round up size to simplify logic. + */ + kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE), + KASAN_USE_AFTER_SCOPE); +} +EXPORT_SYMBOL(__asan_poison_stack_memory); + +/* Emitted by compiler to unpoison large objects when they go into scope. */ +void __asan_unpoison_stack_memory(const void *addr, size_t size) +{ + kasan_unpoison_shadow(addr, size); +} +EXPORT_SYMBOL(__asan_unpoison_stack_memory); + #ifdef CONFIG_MEMORY_HOTPLUG static int kasan_mem_notifier(struct notifier_block *nb, unsigned long action, void *data) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index e5c2181fee6f..1c260e6b3b3c 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -21,6 +21,7 @@ #define KASAN_STACK_MID 0xF2 #define KASAN_STACK_RIGHT 0xF3 #define KASAN_STACK_PARTIAL 0xF4 +#define KASAN_USE_AFTER_SCOPE 0xF8 /* Don't break randconfig/all*config builds */ #ifndef KASAN_ABI_VERSION @@ -53,6 +54,9 @@ struct kasan_global { #if KASAN_ABI_VERSION >= 4 struct kasan_source_location *location; #endif +#if KASAN_ABI_VERSION >= 5 + char *odr_indicator; +#endif }; /** diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 24c1211fe9d5..073325aedc68 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -90,6 +90,9 @@ static void print_error_description(struct kasan_access_info *info) case KASAN_KMALLOC_FREE: bug_type = "use-after-free"; break; + case KASAN_USE_AFTER_SCOPE: + bug_type = "use-after-scope"; + break; } pr_err("BUG: KASAN: %s in %pS at addr %p\n", diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 728d7790dc2d..87e1a7ca3846 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -103,6 +103,7 @@ static struct khugepaged_scan khugepaged_scan = { .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), }; +#ifdef CONFIG_SYSFS static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -295,6 +296,7 @@ struct attribute_group khugepaged_attr_group = { .attrs = khugepaged_attr, .name = "khugepaged", }; +#endif /* CONFIG_SYSFS */ #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB) diff --git a/mm/mlock.c b/mm/mlock.c index 145a4258ddbc..cdbed8aaa426 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -190,10 +190,13 @@ unsigned int munlock_vma_page(struct page *page) */ spin_lock_irq(zone_lru_lock(zone)); - nr_pages = hpage_nr_pages(page); - if (!TestClearPageMlocked(page)) + if (!TestClearPageMlocked(page)) { + /* Potentially, PTE-mapped THP: do not skip the rest PTEs */ + nr_pages = 1; goto unlock_out; + } + nr_pages = hpage_nr_pages(page); __mod_zone_page_state(zone, NR_MLOCK, -nr_pages); if (__munlock_isolate_lru_page(page, true)) { diff --git a/mm/mremap.c b/mm/mremap.c index da22ad2a5678..30d7d2482eea 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -104,11 +104,13 @@ static pte_t move_soft_dirty_pte(pte_t pte) static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, - unsigned long new_addr, bool need_rmap_locks) + unsigned long new_addr, bool need_rmap_locks, bool *need_flush) { struct mm_struct *mm = vma->vm_mm; pte_t *old_pte, *new_pte, pte; spinlock_t *old_ptl, *new_ptl; + bool force_flush = false; + unsigned long len = old_end - old_addr; /* * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma @@ -146,7 +148,19 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, new_pte++, new_addr += PAGE_SIZE) { if (pte_none(*old_pte)) continue; + pte = ptep_get_and_clear(mm, old_addr, old_pte); + /* + * If we are remapping a dirty PTE, make sure + * to flush TLB before we drop the PTL for the + * old PTE or we may race with page_mkclean(). + * + * This check has to be done after we removed the + * old PTE from page tables or another thread may + * dirty it after the check and before the removal. + */ + if (pte_present(pte) && pte_dirty(pte)) + force_flush = true; pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); pte = move_soft_dirty_pte(pte); set_pte_at(mm, new_addr, new_pte, pte); @@ -156,6 +170,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, if (new_ptl != old_ptl) spin_unlock(new_ptl); pte_unmap(new_pte - 1); + if (force_flush) + flush_tlb_range(vma, old_end - len, old_end); + else + *need_flush = true; pte_unmap_unlock(old_pte - 1, old_ptl); if (need_rmap_locks) drop_rmap_locks(vma); @@ -201,13 +219,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (need_rmap_locks) take_rmap_locks(vma); moved = move_huge_pmd(vma, old_addr, new_addr, - old_end, old_pmd, new_pmd); + old_end, old_pmd, new_pmd, + &need_flush); if (need_rmap_locks) drop_rmap_locks(vma); - if (moved) { - need_flush = true; + if (moved) continue; - } } split_huge_pmd(vma, old_pmd, old_addr); if (pmd_trans_unstable(old_pmd)) @@ -220,11 +237,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma, extent = next - new_addr; if (extent > LATENCY_LIMIT) extent = LATENCY_LIMIT; - move_ptes(vma, old_pmd, old_addr, old_addr + extent, - new_vma, new_pmd, new_addr, need_rmap_locks); - need_flush = true; + move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, + new_pmd, new_addr, need_rmap_locks, &need_flush); } - if (likely(need_flush)) + if (need_flush) flush_tlb_range(vma, old_end-len, old_addr); mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); diff --git a/mm/truncate.c b/mm/truncate.c index a01cce450a26..8d8c62d89e6d 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -283,7 +283,7 @@ void truncate_inode_pages_range(struct address_space *mapping, if (!trylock_page(page)) continue; - WARN_ON(page_to_pgoff(page) != index); + WARN_ON(page_to_index(page) != index); if (PageWriteback(page)) { unlock_page(page); continue; @@ -371,7 +371,7 @@ void truncate_inode_pages_range(struct address_space *mapping, } lock_page(page); - WARN_ON(page_to_pgoff(page) != index); + WARN_ON(page_to_index(page) != index); wait_on_page_writeback(page); truncate_inode_page(mapping, page); unlock_page(page); @@ -492,7 +492,7 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, if (!trylock_page(page)) continue; - WARN_ON(page_to_pgoff(page) != index); + WARN_ON(page_to_index(page) != index); /* Middle of THP: skip */ if (PageTransTail(page)) { @@ -612,7 +612,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, } lock_page(page); - WARN_ON(page_to_pgoff(page) != index); + WARN_ON(page_to_index(page) != index); if (page->mapping != mapping) { unlock_page(page); continue; diff --git a/mm/vmscan.c b/mm/vmscan.c index 76fda2268148..d75cdf360730 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2354,6 +2354,8 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc } } + cond_resched(); + if (nr_reclaimed < nr_to_reclaim || scan_adjusted) continue; diff --git a/mm/workingset.c b/mm/workingset.c index 617475f529f4..fb1f9183d89a 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -348,7 +348,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); local_irq_enable(); - if (memcg_kmem_enabled()) { + if (sc->memcg) { pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid, LRU_ALL_FILE); } else { diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index e034afbd1bb0..08ce36147c4c 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -652,6 +652,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, batadv_softif_destroy_sysfs(hard_iface->soft_iface); } + hard_iface->soft_iface = NULL; batadv_hardif_put(hard_iface); out: diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index 2333777f919d..8af1611b8ab2 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg) primary_if = batadv_primary_if_get_selected(bat_priv); if (unlikely(!primary_if)) { err = BATADV_TP_REASON_DST_UNREACHABLE; + tp_vars->reason = err; goto out; } diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index d020299baba4..1904a93f47d5 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -1090,7 +1090,6 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, { struct hci_conn *hcon; struct hci_dev *hdev; - bdaddr_t *src = BDADDR_ANY; int n; n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", @@ -1101,7 +1100,8 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, if (n < 7) return -EINVAL; - hdev = hci_get_route(addr, src); + /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */ + hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC); if (!hdev) return -ENOENT; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 3809617aa98d..dc59eae54717 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -613,7 +613,7 @@ int hci_conn_del(struct hci_conn *conn) return 0; } -struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) +struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) { int use_src = bacmp(src, BDADDR_ANY); struct hci_dev *hdev = NULL, *d; @@ -634,7 +634,29 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) */ if (use_src) { - if (!bacmp(&d->bdaddr, src)) { + bdaddr_t id_addr; + u8 id_addr_type; + + if (src_type == BDADDR_BREDR) { + if (!lmp_bredr_capable(d)) + continue; + bacpy(&id_addr, &d->bdaddr); + id_addr_type = BDADDR_BREDR; + } else { + if (!lmp_le_capable(d)) + continue; + + hci_copy_identity_address(d, &id_addr, + &id_addr_type); + + /* Convert from HCI to three-value type */ + if (id_addr_type == ADDR_LE_DEV_PUBLIC) + id_addr_type = BDADDR_LE_PUBLIC; + else + id_addr_type = BDADDR_LE_RANDOM; + } + + if (!bacmp(&id_addr, src) && id_addr_type == src_type) { hdev = d; break; } } else { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index d4cad29b033f..577f1c01454a 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -7060,7 +7060,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, dst_type, __le16_to_cpu(psm)); - hdev = hci_get_route(dst, &chan->src); + hdev = hci_get_route(dst, &chan->src, chan->src_type); if (!hdev) return -EHOSTUNREACH; diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 8e385a0ae60e..2f2cb5e27cdd 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -178,7 +178,7 @@ static void rfcomm_reparent_device(struct rfcomm_dev *dev) struct hci_dev *hdev; struct hci_conn *conn; - hdev = hci_get_route(&dev->dst, &dev->src); + hdev = hci_get_route(&dev->dst, &dev->src, BDADDR_BREDR); if (!hdev) return; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index f52bcbf2e58c..3125ce670c2f 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -219,7 +219,7 @@ static int sco_connect(struct sock *sk) BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); - hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src); + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); if (!hdev) return -EHOSTUNREACH; diff --git a/net/can/bcm.c b/net/can/bcm.c index 8e999ffdf28b..436a7537e6a9 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -77,7 +77,7 @@ (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) -#define CAN_BCM_VERSION "20160617" +#define CAN_BCM_VERSION "20161123" MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); MODULE_LICENSE("Dual BSD/GPL"); @@ -109,8 +109,9 @@ struct bcm_op { u32 count; u32 nframes; u32 currframe; - struct canfd_frame *frames; - struct canfd_frame *last_frames; + /* void pointers to arrays of struct can[fd]_frame */ + void *frames; + void *last_frames; struct canfd_frame sframe; struct canfd_frame last_sframe; struct sock *sk; @@ -681,7 +682,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) if (op->flags & RX_FILTER_ID) { /* the easiest case */ - bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); + bcm_rx_update_and_send(op, op->last_frames, rxframe); goto rx_starttimer; } @@ -1068,7 +1069,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (msg_head->nframes) { /* update CAN frames content */ - err = memcpy_from_msg((u8 *)op->frames, msg, + err = memcpy_from_msg(op->frames, msg, msg_head->nframes * op->cfsiz); if (err < 0) return err; @@ -1118,7 +1119,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } if (msg_head->nframes) { - err = memcpy_from_msg((u8 *)op->frames, msg, + err = memcpy_from_msg(op->frames, msg, msg_head->nframes * op->cfsiz); if (err < 0) { if (op->frames != &op->sframe) @@ -1163,6 +1164,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, /* check flags */ if (op->flags & RX_RTR_FRAME) { + struct canfd_frame *frame0 = op->frames; /* no timers in RTR-mode */ hrtimer_cancel(&op->thrtimer); @@ -1174,8 +1176,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, * prevent a full-load-loopback-test ... ;-] */ if ((op->flags & TX_CP_CAN_ID) || - (op->frames[0].can_id == op->can_id)) - op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; + (frame0->can_id == op->can_id)) + frame0->can_id = op->can_id & ~CAN_RTR_FLAG; } else { if (op->flags & SETTIMER) { @@ -1549,24 +1551,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); + int ret = 0; if (len < sizeof(*addr)) return -EINVAL; - if (bo->bound) - return -EISCONN; + lock_sock(sk); + + if (bo->bound) { + ret = -EISCONN; + goto fail; + } /* bind a device to this socket */ if (addr->can_ifindex) { struct net_device *dev; dev = dev_get_by_index(&init_net, addr->can_ifindex); - if (!dev) - return -ENODEV; - + if (!dev) { + ret = -ENODEV; + goto fail; + } if (dev->type != ARPHRD_CAN) { dev_put(dev); - return -ENODEV; + ret = -ENODEV; + goto fail; } bo->ifindex = dev->ifindex; @@ -1577,17 +1586,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, bo->ifindex = 0; } - bo->bound = 1; - if (proc_dir) { /* unique socket address as filename */ sprintf(bo->procname, "%lu", sock_i_ino(sk)); bo->bcm_proc_read = proc_create_data(bo->procname, 0644, proc_dir, &bcm_proc_fops, sk); + if (!bo->bcm_proc_read) { + ret = -ENOMEM; + goto fail; + } } - return 0; + bo->bound = 1; + +fail: + release_sock(sk); + + return ret; } static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, diff --git a/net/core/dev.c b/net/core/dev.c index 820bac239738..6666b28b6815 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) { - if (skb_orphan_frags(skb, GFP_ATOMIC) || - unlikely(!is_skb_forwardable(dev, skb))) { - atomic_long_inc(&dev->rx_dropped); - kfree_skb(skb); - return NET_RX_DROP; + int ret = ____dev_forward_skb(dev, skb); + + if (likely(!ret)) { + skb->protocol = eth_type_trans(skb, dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } - skb_scrub_packet(skb, true); - skb->priority = 0; - skb->protocol = eth_type_trans(skb, dev); - skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); - - return 0; + return ret; } EXPORT_SYMBOL_GPL(__dev_forward_skb); @@ -2484,7 +2479,7 @@ int skb_checksum_help(struct sk_buff *skb) goto out; } - *(__sum16 *)(skb->data + offset) = csum_fold(csum); + *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; out_set_summed: skb->ip_summed = CHECKSUM_NONE; out: diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 977489820eb9..047a1752ece1 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -2479,6 +2479,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GET_TS_INFO: case ETHTOOL_GEEE: case ETHTOOL_GTUNABLE: + case ETHTOOL_GLINKSETTINGS: break; default: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) diff --git a/net/core/filter.c b/net/core/filter.c index 00351cdf7d0c..b391209838ef 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) return dev_forward_skb(dev, skb); } +static inline int __bpf_rx_skb_no_mac(struct net_device *dev, + struct sk_buff *skb) +{ + int ret = ____dev_forward_skb(dev, skb); + + if (likely(!ret)) { + skb->dev = dev; + ret = netif_rx(skb); + } + + return ret; +} + static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) { int ret; @@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) return ret; } +static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, + u32 flags) +{ + /* skb->mac_len is not set on normal egress */ + unsigned int mlen = skb->network_header - skb->mac_header; + + __skb_pull(skb, mlen); + + /* At ingress, the mac header has already been pulled once. + * At egress, skb_pospull_rcsum has to be done in case that + * the skb is originated from ingress (i.e. a forwarded skb) + * to ensure that rcsum starts at net header. + */ + if (!skb_at_tc_ingress(skb)) + skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); + skb_pop_mac_header(skb); + skb_reset_mac_len(skb); + return flags & BPF_F_INGRESS ? + __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); +} + +static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, + u32 flags) +{ + bpf_push_mac_rcsum(skb); + return flags & BPF_F_INGRESS ? + __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); +} + +static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, + u32 flags) +{ + switch (dev->type) { + case ARPHRD_TUNNEL: + case ARPHRD_TUNNEL6: + case ARPHRD_SIT: + case ARPHRD_IPGRE: + case ARPHRD_VOID: + case ARPHRD_NONE: + return __bpf_redirect_no_mac(skb, dev, flags); + default: + return __bpf_redirect_common(skb, dev, flags); + } +} + BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) { struct net_device *dev; @@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) return -ENOMEM; } - bpf_push_mac_rcsum(clone); - - return flags & BPF_F_INGRESS ? - __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone); + return __bpf_redirect(clone, dev, flags); } static const struct bpf_func_proto bpf_clone_redirect_proto = { @@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb) return -EINVAL; } - bpf_push_mac_rcsum(skb); - - return ri->flags & BPF_F_INGRESS ? - __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); + return __bpf_redirect(skb, dev, ri->flags); } static const struct bpf_func_proto bpf_redirect_proto = { diff --git a/net/core/flow.c b/net/core/flow.c index 3937b1b68d5b..18e8893d4be5 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -95,7 +95,6 @@ static void flow_cache_gc_task(struct work_struct *work) list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) { flow_entry_kill(fce, xfrm); atomic_dec(&xfrm->flow_cache_gc_count); - WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0); } } @@ -236,9 +235,8 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, if (fcp->hash_count > fc->high_watermark) flow_cache_shrink(fc, fcp); - if (fcp->hash_count > 2 * fc->high_watermark || - atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) { - atomic_inc(&net->xfrm.flow_cache_genid); + if (atomic_read(&net->xfrm.flow_cache_gc_count) > + 2 * num_online_cpus() * fc->high_watermark) { flo = ERR_PTR(-ENOBUFS); goto ret_object; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ab193e5def07..c6d8207ffa7e 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -122,7 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_keyid *key_keyid; bool skip_vlan = false; u8 ip_proto = 0; - bool ret = false; + bool ret; if (!data) { data = skb->data; @@ -549,12 +549,17 @@ ip_proto_again: out_good: ret = true; -out_bad: + key_control->thoff = (u16)nhoff; +out: key_basic->n_proto = proto; key_basic->ip_proto = ip_proto; - key_control->thoff = (u16)nhoff; return ret; + +out_bad: + ret = false; + key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); + goto out; } EXPORT_SYMBOL(__skb_flow_dissect); @@ -1008,4 +1013,4 @@ static int __init init_default_flow_dissectors(void) return 0; } -late_initcall_sync(init_default_flow_dissectors); +core_initcall(init_default_flow_dissectors); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index f61c0e02a413..7001da910c6b 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -219,6 +219,8 @@ int peernet2id_alloc(struct net *net, struct net *peer) bool alloc; int id; + if (atomic_read(&net->count) == 0) + return NETNSA_NSID_NOT_ASSIGNED; spin_lock_irqsave(&net->nsid_lock, flags); alloc = atomic_read(&peer->count) == 0 ? false : true; id = __peernet2id_alloc(net, peer, &alloc); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fb7348f13501..a6196cf844f6 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -275,6 +275,7 @@ int rtnl_unregister(int protocol, int msgtype) rtnl_msg_handlers[protocol][msgindex].doit = NULL; rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; + rtnl_msg_handlers[protocol][msgindex].calcit = NULL; return 0; } @@ -839,18 +840,20 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, if (dev->dev.parent && dev_is_pci(dev->dev.parent) && (ext_filter_mask & RTEXT_FILTER_VF)) { int num_vfs = dev_num_vf(dev->dev.parent); - size_t size = nla_total_size(sizeof(struct nlattr)); - size += nla_total_size(num_vfs * sizeof(struct nlattr)); + size_t size = nla_total_size(0); size += num_vfs * - (nla_total_size(sizeof(struct ifla_vf_mac)) + - nla_total_size(MAX_VLAN_LIST_LEN * - sizeof(struct nlattr)) + + (nla_total_size(0) + + nla_total_size(sizeof(struct ifla_vf_mac)) + + nla_total_size(sizeof(struct ifla_vf_vlan)) + + nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ nla_total_size(MAX_VLAN_LIST_LEN * sizeof(struct ifla_vf_vlan_info)) + nla_total_size(sizeof(struct ifla_vf_spoofchk)) + + nla_total_size(sizeof(struct ifla_vf_tx_rate)) + nla_total_size(sizeof(struct ifla_vf_rate)) + nla_total_size(sizeof(struct ifla_vf_link_state)) + nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + + nla_total_size(0) + /* nest IFLA_VF_STATS */ /* IFLA_VF_STATS_RX_PACKETS */ nla_total_size_64bit(sizeof(__u64)) + /* IFLA_VF_STATS_TX_PACKETS */ @@ -898,7 +901,8 @@ static size_t rtnl_port_size(const struct net_device *dev, static size_t rtnl_xdp_size(const struct net_device *dev) { - size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */ + size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ + nla_total_size(1); /* XDP_ATTACHED */ if (!dev->netdev_ops->ndo_xdp) return 0; @@ -927,8 +931,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_PROMISCUITY */ + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */ + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */ - + nla_total_size(4) /* IFLA_MAX_GSO_SEGS */ - + nla_total_size(4) /* IFLA_MAX_GSO_SIZE */ + + nla_total_size(4) /* IFLA_GSO_MAX_SEGS */ + + nla_total_size(4) /* IFLA_GSO_MAX_SIZE */ + nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(4) /* IFLA_CARRIER_CHANGES */ @@ -1605,7 +1609,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) head = &net->dev_index_head[h]; hlist_for_each_entry(dev, head, index_hlist) { if (link_dump_filtered(dev, master_idx, kind_ops)) - continue; + goto cont; if (idx < s_idx) goto cont; err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, @@ -2733,7 +2737,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) ext_filter_mask)); } - return min_ifinfo_dump_size; + return nlmsg_total_size(min_ifinfo_dump_size); } static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) @@ -2848,7 +2852,10 @@ nla_put_failure: static inline size_t rtnl_fdb_nlmsg_size(void) { - return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); + return NLMSG_ALIGN(sizeof(struct ndmsg)) + + nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ + nla_total_size(sizeof(u16)) + /* NDA_VLAN */ + 0; } static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, diff --git a/net/core/sock.c b/net/core/sock.c index c73e28fc9c2a..00a074dbfe9b 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) EXPORT_SYMBOL(sock_queue_rcv_skb); int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, - const int nested, unsigned int trim_cap) + const int nested, unsigned int trim_cap, bool refcounted) { int rc = NET_RX_SUCCESS; @@ -487,7 +487,8 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, bh_unlock_sock(sk); out: - sock_put(sk); + if (refcounted) + sock_put(sk); return rc; discard_and_relse: kfree_skb(skb); @@ -714,7 +715,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, val = min_t(u32, val, sysctl_wmem_max); set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; - sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); + sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); /* Wake up sending tasks if we upped the value. */ sk->sk_write_space(sk); break; @@ -750,7 +751,7 @@ set_rcvbuf: * returning the value we actually used in getsockopt * is the most desirable behavior. */ - sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); + sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); break; case SO_RCVBUFFORCE: @@ -1543,6 +1544,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); newsk->sk_err = 0; + newsk->sk_err_soft = 0; newsk->sk_priority = 0; newsk->sk_incoming_cpu = raw_smp_processor_id(); atomic64_set(&newsk->sk_cookie, 0); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 345a3aeb8c7e..edbe59d203ef 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) { const struct iphdr *iph = (struct iphdr *)skb->data; const u8 offset = iph->ihl << 2; - const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); + const struct dccp_hdr *dh; struct dccp_sock *dp; struct inet_sock *inet; const int type = icmp_hdr(skb)->type; @@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) int err; struct net *net = dev_net(skb->dev); - if (skb->len < offset + sizeof(*dh) || - skb->len < offset + __dccp_basic_hdr_len(dh)) { - __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); - return; - } + /* Only need dccph_dport & dccph_sport which are the first + * 4 bytes in dccp header. + * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. + */ + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); + dh = (struct dccp_hdr *)(skb->data + offset); sk = __inet_lookup_established(net, &dccp_hashinfo, iph->daddr, dh->dccph_dport, @@ -698,6 +700,7 @@ int dccp_invalid_packet(struct sk_buff *skb) { const struct dccp_hdr *dh; unsigned int cscov; + u8 dccph_doff; if (skb->pkt_type != PACKET_HOST) return 1; @@ -719,18 +722,19 @@ int dccp_invalid_packet(struct sk_buff *skb) /* * If P.Data Offset is too small for packet type, drop packet and return */ - if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { - DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff); + dccph_doff = dh->dccph_doff; + if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) { + DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff); return 1; } /* * If P.Data Offset is too too large for packet, drop packet and return */ - if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) { - DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff); + if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) { + DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff); return 1; } - + dh = dccp_hdr(skb); /* * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet * has short sequence numbers), drop packet and return @@ -868,7 +872,7 @@ lookup: goto discard_and_relse; nf_reset(skb); - return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4); + return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted); no_dccp_socket: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 3828f94b234c..715e5d1dc107 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; - const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); + const struct dccp_hdr *dh; struct dccp_sock *dp; struct ipv6_pinfo *np; struct sock *sk; @@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, __u64 seq; struct net *net = dev_net(skb->dev); - if (skb->len < offset + sizeof(*dh) || - skb->len < offset + __dccp_basic_hdr_len(dh)) { - __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), - ICMP6_MIB_INERRORS); - return; - } + /* Only need dccph_dport & dccph_sport which are the first + * 4 bytes in dccp header. + * Our caller (icmpv6_notify()) already pulled 8 bytes for us. + */ + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); + BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); + dh = (struct dccp_hdr *)(skb->data + offset); sk = __inet6_lookup_established(net, &dccp_hashinfo, &hdr->daddr, dh->dccph_dport, @@ -738,7 +739,8 @@ lookup: if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; - return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0; + return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, + refcounted) ? -1 : 0; no_dccp_socket: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) @@ -956,6 +958,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), + .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 41e65804ddf5..9fe25bf63296 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout) __kfree_skb(skb); } + /* If socket has been already reset kill it. */ + if (sk->sk_state == DCCP_CLOSED) + goto adjudge_to_death; + if (data_was_unread) { /* Unread data was tossed, send an appropriate Reset Code */ DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index a6902c1e2f28..7899919cd9f0 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -233,6 +233,8 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, genphy_read_status(phydev); if (ds->ops->adjust_link) ds->ops->adjust_link(ds, port, phydev); + + put_device(&phydev->mdio.dev); } return 0; @@ -504,15 +506,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, void dsa_cpu_dsa_destroy(struct device_node *port_dn) { - struct phy_device *phydev; - - if (of_phy_is_fixed_link(port_dn)) { - phydev = of_phy_find_device(port_dn); - if (phydev) { - phy_device_free(phydev); - fixed_phy_unregister(phydev); - } - } + if (of_phy_is_fixed_link(port_dn)) + of_phy_deregister_fixed_link(port_dn); } static void dsa_switch_destroy(struct dsa_switch *ds) diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index f8a7d9aab437..5fff951a0a49 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -28,8 +28,10 @@ static struct dsa_switch_tree *dsa_get_dst(u32 tree) struct dsa_switch_tree *dst; list_for_each_entry(dst, &dsa_switch_trees, list) - if (dst->tree == tree) + if (dst->tree == tree) { + kref_get(&dst->refcount); return dst; + } return NULL; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6b1282c006b1..30e2e21d7619 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1125,7 +1125,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, p->phy_interface = mode; phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); - if (of_phy_is_fixed_link(port_dn)) { + if (!phy_dn && of_phy_is_fixed_link(port_dn)) { /* In the case of a fixed PHY, the DT node associated * to the fixed PHY is the Port DT node */ @@ -1135,7 +1135,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, return ret; } phy_is_fixed = true; - phy_dn = port_dn; + phy_dn = of_node_get(port_dn); } if (ds->ops->get_phy_flags) @@ -1154,6 +1154,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, ret = dsa_slave_phy_connect(p, slave_dev, phy_id); if (ret) { netdev_err(slave_dev, "failed to connect to phy%d: %d\n", phy_id, ret); + of_node_put(phy_dn); return ret; } } else { @@ -1162,6 +1163,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, phy_flags, p->phy_interface); } + + of_node_put(phy_dn); } if (p->phy && phy_is_fixed) @@ -1174,6 +1177,8 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p, ret = dsa_slave_phy_connect(p, slave_dev, p->port); if (ret) { netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret); + if (phy_is_fixed) + of_phy_deregister_fixed_link(port_dn); return ret; } } @@ -1289,10 +1294,18 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, void dsa_slave_destroy(struct net_device *slave_dev) { struct dsa_slave_priv *p = netdev_priv(slave_dev); + struct dsa_switch *ds = p->parent; + struct device_node *port_dn; + + port_dn = ds->ports[p->port].dn; netif_carrier_off(slave_dev); - if (p->phy) + if (p->phy) { phy_disconnect(p->phy); + + if (of_phy_is_fixed_link(port_dn)) + of_phy_deregister_fixed_link(port_dn); + } unregister_netdev(slave_dev); free_netdev(slave_dev); } diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 300b06888fdf..b54b3ca939db 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -715,6 +715,7 @@ config DEFAULT_TCP_CONG default "reno" if DEFAULT_RENO default "dctcp" if DEFAULT_DCTCP default "cdg" if DEFAULT_CDG + default "bbr" if DEFAULT_BBR default "cubic" config TCP_MD5SIG diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 9648c97e541f..215143246e4b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -533,9 +533,9 @@ EXPORT_SYMBOL(inet_dgram_connect); static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) { - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending += writebias; /* Basic assumption: if someone sets sk->sk_err, he _must_ @@ -545,13 +545,12 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) */ while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { release_sock(sk); - timeo = schedule_timeout(timeo); + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); lock_sock(sk); if (signal_pending(current) || !timeo) break; - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } - finish_wait(sk_sleep(sk), &wait); + remove_wait_queue(sk_sleep(sk), &wait); sk->sk_write_pending -= writebias; return timeo; } @@ -1234,7 +1233,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); /* fixed ID is invalid if DF bit is not set */ - if (fixedid && !(iph->frag_off & htons(IP_DF))) + if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF))) goto out; } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index d95631d09248..20fb25e3027b 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -476,7 +476,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) esph = (void *)skb_push(skb, 4); *seqhi = esph->spi; esph->spi = esph->seq_no; - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; aead_request_set_callback(req, 0, esp_input_done_esn, skb); } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index c3b80478226e..161fc0f0d752 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -151,7 +151,7 @@ static void fib_replace_table(struct net *net, struct fib_table *old, int fib_unmerge(struct net *net) { - struct fib_table *old, *new; + struct fib_table *old, *new, *main_table; /* attempt to fetch local table if it has been allocated */ old = fib_get_table(net, RT_TABLE_LOCAL); @@ -162,11 +162,21 @@ int fib_unmerge(struct net *net) if (!new) return -ENOMEM; + /* table is already unmerged */ + if (new == old) + return 0; + /* replace merged table with clean table */ - if (new != old) { - fib_replace_table(net, old, new); - fib_free_table(old); - } + fib_replace_table(net, old, new); + fib_free_table(old); + + /* attempt to fetch main table if it has been allocated */ + main_table = fib_get_table(net, RT_TABLE_MAIN); + if (!main_table) + return 0; + + /* flush local entries from main table */ + fib_table_flush_external(main_table); return 0; } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 31cef3602585..026f309c51e9 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1743,8 +1743,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) local_l = fib_find_node(lt, &local_tp, l->key); if (fib_insert_alias(lt, local_tp, local_l, new_fa, - NULL, l->key)) + NULL, l->key)) { + kmem_cache_free(fn_alias_kmem, new_fa); goto out; + } } /* stop loop if key wrapped back to 0 */ @@ -1760,6 +1762,71 @@ out: return NULL; } +/* Caller must hold RTNL */ +void fib_table_flush_external(struct fib_table *tb) +{ + struct trie *t = (struct trie *)tb->tb_data; + struct key_vector *pn = t->kv; + unsigned long cindex = 1; + struct hlist_node *tmp; + struct fib_alias *fa; + + /* walk trie in reverse order */ + for (;;) { + unsigned char slen = 0; + struct key_vector *n; + + if (!(cindex--)) { + t_key pkey = pn->key; + + /* cannot resize the trie vector */ + if (IS_TRIE(pn)) + break; + + /* resize completed node */ + pn = resize(t, pn); + cindex = get_index(pkey, pn); + + continue; + } + + /* grab the next available node */ + n = get_child(pn, cindex); + if (!n) + continue; + + if (IS_TNODE(n)) { + /* record pn and cindex for leaf walking */ + pn = n; + cindex = 1ul << n->bits; + + continue; + } + + hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { + /* if alias was cloned to local then we just + * need to remove the local copy from main + */ + if (tb->tb_id != fa->tb_id) { + hlist_del_rcu(&fa->fa_list); + alias_free_mem_rcu(fa); + continue; + } + + /* record local slen */ + slen = fa->fa_slen; + } + + /* update leaf slen */ + n->slen = slen; + + if (hlist_empty(&n->leaf)) { + put_child_root(pn, n->key, NULL); + node_free(n); + } + } +} + /* Caller must hold RTNL. */ int fib_table_flush(struct net *net, struct fib_table *tb) { @@ -2413,22 +2480,19 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, struct key_vector *l, **tp = &iter->tnode; t_key key; - /* use cache location of next-to-find key */ + /* use cached location of previously found key */ if (iter->pos > 0 && pos >= iter->pos) { - pos -= iter->pos; key = iter->key; } else { - iter->pos = 0; + iter->pos = 1; key = 0; } - while ((l = leaf_walk_rcu(tp, key)) != NULL) { + pos -= iter->pos; + + while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) { key = l->key + 1; iter->pos++; - - if (--pos <= 0) - break; - l = NULL; /* handle unlikely case of a key wrap */ @@ -2437,7 +2501,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, } if (l) - iter->key = key; /* remember it */ + iter->key = l->key; /* remember it */ else iter->pos = 0; /* forget it */ @@ -2465,7 +2529,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) return fib_route_get_idx(iter, *pos); iter->pos = 0; - iter->key = 0; + iter->key = KEY_MAX; return SEQ_START_TOKEN; } @@ -2474,7 +2538,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct fib_route_iter *iter = seq->private; struct key_vector *l = NULL; - t_key key = iter->key; + t_key key = iter->key + 1; ++*pos; @@ -2483,7 +2547,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) l = leaf_walk_rcu(&iter->tnode, key); if (l) { - iter->key = l->key + 1; + iter->key = l->key; iter->pos++; } else { iter->pos = 0; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 38abe70e595f..48734ee6293f 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -477,7 +477,7 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; - fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev); + fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); rt = __ip_route_output_key_hash(net, fl4, @@ -502,7 +502,7 @@ static struct rtable *icmp_route_lookup(struct net *net, if (err) goto relookup_failed; - if (inet_addr_type_dev_table(net, skb_in->dev, + if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev, fl4_dec.saddr) == RTN_LOCAL) { rt2 = __ip_route_output_key(net, &fl4_dec); if (IS_ERR(rt2)) diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 606cc3e85d2b..15db786d50ed 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -162,7 +162,7 @@ static int unsolicited_report_interval(struct in_device *in_dev) } static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); -static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr); +static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im); static void igmpv3_clear_delrec(struct in_device *in_dev); static int sf_setstate(struct ip_mc_list *pmc); static void sf_markstate(struct ip_mc_list *pmc); @@ -1130,10 +1130,15 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) spin_unlock_bh(&in_dev->mc_tomb_lock); } -static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) +/* + * restore ip_mc_list deleted records + */ +static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) { struct ip_mc_list *pmc, *pmc_prev; - struct ip_sf_list *psf, *psf_next; + struct ip_sf_list *psf; + struct net *net = dev_net(in_dev->dev); + __be32 multiaddr = im->multiaddr; spin_lock_bh(&in_dev->mc_tomb_lock); pmc_prev = NULL; @@ -1149,16 +1154,26 @@ static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) in_dev->mc_tomb = pmc->next; } spin_unlock_bh(&in_dev->mc_tomb_lock); + + spin_lock_bh(&im->lock); if (pmc) { - for (psf = pmc->tomb; psf; psf = psf_next) { - psf_next = psf->sf_next; - kfree(psf); + im->interface = pmc->interface; + im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + im->sfmode = pmc->sfmode; + if (pmc->sfmode == MCAST_INCLUDE) { + im->tomb = pmc->tomb; + im->sources = pmc->sources; + for (psf = im->sources; psf; psf = psf->sf_next) + psf->sf_crcount = im->crcount; } in_dev_put(pmc->interface); - kfree(pmc); } + spin_unlock_bh(&im->lock); } +/* + * flush ip_mc_list deleted records + */ static void igmpv3_clear_delrec(struct in_device *in_dev) { struct ip_mc_list *pmc, *nextpmc; @@ -1366,7 +1381,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) ip_mc_hash_add(in_dev, im); #ifdef CONFIG_IP_MULTICAST - igmpv3_del_delrec(in_dev, im->multiaddr); + igmpv3_del_delrec(in_dev, im); #endif igmp_group_added(im); if (!in_dev->dead) @@ -1626,8 +1641,12 @@ void ip_mc_remap(struct in_device *in_dev) ASSERT_RTNL(); - for_each_pmc_rtnl(in_dev, pmc) + for_each_pmc_rtnl(in_dev, pmc) { +#ifdef CONFIG_IP_MULTICAST + igmpv3_del_delrec(in_dev, pmc); +#endif igmp_group_added(pmc); + } } /* Device going down */ @@ -1648,7 +1667,6 @@ void ip_mc_down(struct in_device *in_dev) in_dev->mr_gq_running = 0; if (del_timer(&in_dev->mr_gq_timer)) __in_dev_put(in_dev); - igmpv3_clear_delrec(in_dev); #endif ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); @@ -1688,8 +1706,12 @@ void ip_mc_up(struct in_device *in_dev) #endif ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); - for_each_pmc_rtnl(in_dev, pmc) + for_each_pmc_rtnl(in_dev, pmc) { +#ifdef CONFIG_IP_MULTICAST + igmpv3_del_delrec(in_dev, pmc); +#endif igmp_group_added(pmc); + } } /* @@ -1704,13 +1726,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev) /* Deactivate timers */ ip_mc_down(in_dev); +#ifdef CONFIG_IP_MULTICAST + igmpv3_clear_delrec(in_dev); +#endif while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { in_dev->mc_list = i->next_rcu; in_dev->mc_count--; - - /* We've dropped the groups in ip_mc_down already */ - ip_mc_clear_src(i); ip_ma_put(i); } } diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 8b4ffd216839..9f0a7b96646f 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -117,7 +117,7 @@ int ip_forward(struct sk_buff *skb) if (opt->is_strictroute && rt->rt_uses_gateway) goto sr_failed; - IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; + IPCB(skb)->flags |= IPSKB_FORWARDED; mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); if (ip_exceeds_mtu(skb, mtu)) { IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 03e7f7310423..877bdb02e887 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -107,6 +107,8 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) if (unlikely(!skb)) return 0; + skb->protocol = htons(ETH_P_IP); + return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); @@ -239,19 +241,23 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, struct sk_buff *segs; int ret = 0; - /* common case: fragmentation of segments is not allowed, - * or seglen is <= mtu + /* common case: seglen is <= mtu */ - if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) || - skb_gso_validate_mtu(skb, mtu)) + if (skb_gso_validate_mtu(skb, mtu)) return ip_finish_output2(net, sk, skb); - /* Slowpath - GSO segment length is exceeding the dst MTU. + /* Slowpath - GSO segment length exceeds the egress MTU. * - * This can happen in two cases: - * 1) TCP GRO packet, DF bit not set - * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly - * from host network stack. + * This can happen in several cases: + * - Forwarding of a TCP GRO skb, when DF flag is not set. + * - Forwarding of an skb that arrived on a virtualization interface + * (virtio-net/vhost/tap) with TSO/GSO size set by other network + * stack. + * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an + * interface with a smaller MTU. + * - Arriving GRO skb (or GSO skb in a virtualized environment) that is + * bridged to a NETIF_F_TSO tunnel stacked over an interface with an + * insufficent MTU. */ features = netif_skb_features(skb); BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); @@ -1579,7 +1585,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, } oif = arg->bound_dev_if; - oif = oif ? : skb->skb_iif; + if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) + oif = skb->skb_iif; flowi4_init_output(&fl4, oif, IP4_REPLY_MARK(net, skb->mark), diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 777bc1883870..fed3d29f9eb3 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -63,7 +63,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, int pkt_len = skb->len - skb_inner_network_offset(skb); struct net *net = dev_net(rt->dst.dev); struct net_device *dev = skb->dev; - int skb_iif = skb->skb_iif; struct iphdr *iph; int err; @@ -73,16 +72,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, skb_dst_set(skb, &rt->dst); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); - if (skb_iif && !(df & htons(IP_DF))) { - /* Arrived from an ingress interface, got encapsulated, with - * fragmentation of encapulating frames allowed. - * If skb is gso, the resulting encapsulated network segments - * may exceed dst mtu. - * Allow IP Fragmentation of segments. - */ - IPCB(skb)->flags |= IPSKB_FRAG_SEGS; - } - /* Push down and install the IP header. */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 5f006e13de56..27089f5ebbb1 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1749,7 +1749,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, vif->dev->stats.tx_bytes += skb->len; } - IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; + IPCB(skb)->flags |= IPSKB_FORWARDED; /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally * not only before forwarding, but after forwarding on all output diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index c3776ff6749f..b3cc1335adbc 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -24,10 +24,11 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; + struct net_device *dev = skb_dst(skb)->dev; unsigned int hh_len; if (addr_type == RTN_UNSPEC) - addr_type = inet_addr_type(net, saddr); + addr_type = inet_addr_type_dev_table(net, dev, saddr); if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) flags |= FLOWI_FLAG_ANYSRC; else @@ -40,6 +41,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t fl4.saddr = saddr; fl4.flowi4_tos = RT_TOS(iph->tos); fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; + if (!fl4.flowi4_oif) + fl4.flowi4_oif = l3mdev_master_ifindex(dev); fl4.flowi4_mark = skb->mark; fl4.flowi4_flags = flags; rt = ip_route_output_key(net, &fl4); diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index b31df597fd37..697538464e6e 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1201,8 +1201,8 @@ static int translate_compat_table(struct xt_table_info **pinfo, newinfo->number = compatr->num_entries; for (i = 0; i < NF_ARP_NUMHOOKS; i++) { - newinfo->hook_entry[i] = info->hook_entry[i]; - newinfo->underflow[i] = info->underflow[i]; + newinfo->hook_entry[i] = compatr->hook_entry[i]; + newinfo->underflow[i] = compatr->underflow[i]; } entry1 = newinfo->entries; pos = entry1; diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c index bf855e64fc45..0c01a270bf9f 100644 --- a/net/ipv4/netfilter/nft_dup_ipv4.c +++ b/net/ipv4/netfilter/nft_dup_ipv4.c @@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr, struct in_addr gw = { .s_addr = (__force __be32)regs->data[priv->sreg_addr], }; - int oif = regs->data[priv->sreg_dev]; + int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); } @@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr) { struct nft_dup_ipv4 *priv = nft_expr_priv(expr); - if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || + if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) + goto nla_put_failure; + if (priv->sreg_dev && nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) goto nla_put_failure; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 62d4d90c1389..2a57566e6e91 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -753,7 +753,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow goto reject_redirect; } - n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); + n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); + if (!n) + n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); if (!IS_ERR(n)) { if (!(n->nud_state & NUD_VALID)) { neigh_event_send(n, NULL); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3251fe71f39f..814af89c1bd3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1164,7 +1164,7 @@ restart: err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) - goto out_err; + goto do_error; sg = !!(sk->sk_route_caps & NETIF_F_SG); @@ -1241,7 +1241,7 @@ new_segment: if (!skb_can_coalesce(skb, i, pfrag->page, pfrag->offset)) { - if (i == sysctl_max_skb_frags || !sg) { + if (i >= sysctl_max_skb_frags || !sg) { tcp_mark_push(tp, skb); goto new_segment; } diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 1294af4e0127..f9038d6b109e 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -200,8 +200,10 @@ static void tcp_reinit_congestion_control(struct sock *sk, icsk->icsk_ca_ops = ca; icsk->icsk_ca_setsockopt = 1; - if (sk->sk_state != TCP_CLOSE) + if (sk->sk_state != TCP_CLOSE) { + memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); tcp_init_congestion_control(sk); + } } /* Manage refcounts on socket close. */ diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 10d728b6804c..ab37c6775630 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c @@ -56,6 +56,7 @@ struct dctcp { u32 next_seq; u32 ce_state; u32 delayed_ack_reserved; + u32 loss_cwnd; }; static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ @@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk) ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); ca->delayed_ack_reserved = 0; + ca->loss_cwnd = 0; ca->ce_state = 0; dctcp_reset(tp, ca); @@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk) static u32 dctcp_ssthresh(struct sock *sk) { - const struct dctcp *ca = inet_csk_ca(sk); + struct dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); + ca->loss_cwnd = tp->snd_cwnd; return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); } @@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, return 0; } +static u32 dctcp_cwnd_undo(struct sock *sk) +{ + const struct dctcp *ca = inet_csk_ca(sk); + + return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); +} + static struct tcp_congestion_ops dctcp __read_mostly = { .init = dctcp_init, .in_ack_event = dctcp_update_alpha, .cwnd_event = dctcp_cwnd_event, .ssthresh = dctcp_ssthresh, .cong_avoid = tcp_reno_cong_avoid, + .undo_cwnd = dctcp_cwnd_undo, .set_state = dctcp_state, .get_info = dctcp_get_info, .flags = TCP_CONG_NEEDS_ECN, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 61b7be303eec..2259114c7242 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1564,6 +1564,21 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(tcp_add_backlog); +int tcp_filter(struct sock *sk, struct sk_buff *skb) +{ + struct tcphdr *th = (struct tcphdr *)skb->data; + unsigned int eaten = skb->len; + int err; + + err = sk_filter_trim_cap(sk, skb, th->doff * 4); + if (!err) { + eaten -= skb->len; + TCP_SKB_CB(skb)->end_seq -= eaten; + } + return err; +} +EXPORT_SYMBOL(tcp_filter); + /* * From tcp_input.c */ @@ -1676,8 +1691,10 @@ process: nf_reset(skb); - if (sk_filter(sk, skb)) + if (tcp_filter(sk, skb)) goto discard_and_relse; + th = (const struct tcphdr *)skb->data; + iph = ip_hdr(skb); skb->dev = NULL; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d123d68f4d1d..5bab6c3f7a2f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1455,7 +1455,7 @@ static void udp_v4_rehash(struct sock *sk) udp_lib_rehash(sk, new_hash); } -static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; @@ -1652,10 +1652,10 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, if (use_hash2) { hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & - udp_table.mask; - hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask; + udptable->mask; + hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udp_table.hash2[hash2]; + hslot = &udptable->hash2[hash2]; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 7e0fe4bdd967..feb50a16398d 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -25,7 +25,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); -int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); void udp_destroy_sock(struct sock *sk); #ifdef CONFIG_PROC_FS diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index af817158d830..ff450c2aad9b 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -50,7 +50,7 @@ struct proto udplite_prot = { .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, - .backlog_rcv = udp_queue_rcv_skb, + .backlog_rcv = __udp_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .get_port = udp_v4_get_port, diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 060dd9922018..4bc5ba3ae452 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -183,7 +183,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, static void addrconf_dad_start(struct inet6_ifaddr *ifp); static void addrconf_dad_work(struct work_struct *w); -static void addrconf_dad_completed(struct inet6_ifaddr *ifp); +static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id); static void addrconf_dad_run(struct inet6_dev *idev); static void addrconf_rs_timer(unsigned long data); static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); @@ -2898,6 +2898,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, spin_lock_bh(&ifp->lock); ifp->flags &= ~IFA_F_TENTATIVE; spin_unlock_bh(&ifp->lock); + rt_genid_bump_ipv6(dev_net(idev->dev)); ipv6_ifa_notify(RTM_NEWADDR, ifp); in6_ifa_put(ifp); } @@ -3740,7 +3741,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; struct net_device *dev = idev->dev; - bool notify = false; + bool bump_id, notify = false; addrconf_join_solict(dev, &ifp->addr); @@ -3755,11 +3756,12 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) idev->cnf.accept_dad < 1 || !(ifp->flags&IFA_F_TENTATIVE) || ifp->flags & IFA_F_NODAD) { + bump_id = ifp->flags & IFA_F_TENTATIVE; ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); spin_unlock(&ifp->lock); read_unlock_bh(&idev->lock); - addrconf_dad_completed(ifp); + addrconf_dad_completed(ifp, bump_id); return; } @@ -3819,8 +3821,8 @@ static void addrconf_dad_work(struct work_struct *w) struct inet6_ifaddr, dad_work); struct inet6_dev *idev = ifp->idev; + bool bump_id, disable_ipv6 = false; struct in6_addr mcaddr; - bool disable_ipv6 = false; enum { DAD_PROCESS, @@ -3890,11 +3892,12 @@ static void addrconf_dad_work(struct work_struct *w) * DAD was successful */ + bump_id = ifp->flags & IFA_F_TENTATIVE; ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); spin_unlock(&ifp->lock); write_unlock_bh(&idev->lock); - addrconf_dad_completed(ifp); + addrconf_dad_completed(ifp, bump_id); goto out; } @@ -3931,7 +3934,7 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp) return true; } -static void addrconf_dad_completed(struct inet6_ifaddr *ifp) +static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) { struct net_device *dev = ifp->idev->dev; struct in6_addr lladdr; @@ -3983,6 +3986,9 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) spin_unlock(&ifp->lock); write_unlock_bh(&ifp->idev->lock); } + + if (bump_id) + rt_genid_bump_ipv6(dev_net(dev)); } static void addrconf_dad_run(struct inet6_dev *idev) diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 37874e2f30ed..ccf40550c475 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -139,7 +139,8 @@ void ip6_datagram_release_cb(struct sock *sk) } EXPORT_SYMBOL_GPL(ip6_datagram_release_cb); -static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, + int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_sock *inet = inet_sk(sk); @@ -252,6 +253,7 @@ ipv4_connected: out: return err; } +EXPORT_SYMBOL_GPL(__ip6_datagram_connect); int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 060a60b2f8a6..111ba55fd512 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -418,7 +418,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) esph = (void *)skb_push(skb, 4); *seqhi = esph->spi; esph->spi = esph->seq_no; - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi); + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; aead_request_set_callback(req, 0, esp_input_done_esn, skb); } diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index bd59c343d35f..2772004ba5a1 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -447,8 +447,10 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, if (__ipv6_addr_needs_scope_id(addr_type)) iif = skb->dev->ifindex; - else - iif = l3mdev_master_ifindex(skb->dev); + else { + dst = skb_dst(skb); + iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev); + } /* * Must not send error if the source does not uniquely diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 1fcf61f1cbc3..89c59e656f44 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -99,7 +99,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, segs = ops->callbacks.gso_segment(skb, features); } - if (IS_ERR(segs)) + if (IS_ERR_OR_NULL(segs)) goto out; gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6001e781164e..59eb4ed99ce8 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1366,7 +1366,7 @@ emsgsize: if (((length > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && - (rt->dst.dev->features & NETIF_F_UFO) && + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { err = ip6_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, exthdrlen, diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 87784560dc46..d76674efe523 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1034,6 +1034,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, int mtu; unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; unsigned int max_headroom = psh_hlen; + bool use_cache = false; u8 hop_limit; int err = -1; @@ -1066,7 +1067,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); neigh_release(neigh); - } else if (!fl6->flowi6_mark) + } else if (!(t->parms.flags & + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { + /* enable the cache only only if the routing decision does + * not depend on the current inner header value + */ + use_cache = true; + } + + if (use_cache) dst = dst_cache_get(&t->dst_cache); if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) @@ -1150,7 +1159,7 @@ route_lookup: if (t->encap.type != TUNNEL_ENCAP_NONE) goto tx_err_dst_release; } else { - if (!fl6->flowi6_mark && ndst) + if (use_cache && ndst) dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); } skb_dst_set(skb, dst); @@ -1172,7 +1181,6 @@ route_lookup: if (err) return err; - skb->protocol = htons(ETH_P_IPV6); skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index a7520528ecd2..b283f293ee4a 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c @@ -88,9 +88,6 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, uh->len = htons(skb->len); - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); - IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED - | IPSKB_REROUTED); skb_dst_set(skb, dst); udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 8a02ca8a11af..c299c1e2bbf0 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -1138,6 +1138,33 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = { .priority = 100, }; +static bool is_vti6_tunnel(const struct net_device *dev) +{ + return dev->netdev_ops == &vti6_netdev_ops; +} + +static int vti6_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct ip6_tnl *t = netdev_priv(dev); + + if (!is_vti6_tunnel(dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_DOWN: + if (!net_eq(t->net, dev_net(dev))) + xfrm_garbage_collect(t->net); + break; + } + return NOTIFY_DONE; +} + +static struct notifier_block vti6_notifier_block __read_mostly = { + .notifier_call = vti6_device_event, +}; + /** * vti6_tunnel_init - register protocol and reserve needed resources * @@ -1148,6 +1175,8 @@ static int __init vti6_tunnel_init(void) const char *msg; int err; + register_netdevice_notifier(&vti6_notifier_block); + msg = "tunnel device"; err = register_pernet_device(&vti6_net_ops); if (err < 0) @@ -1180,6 +1209,7 @@ xfrm_proto_ah_failed: xfrm_proto_esp_failed: unregister_pernet_device(&vti6_net_ops); pernet_dev_failed: + unregister_netdevice_notifier(&vti6_notifier_block); pr_err("vti6 init: failed to register %s\n", msg); return err; } @@ -1194,6 +1224,7 @@ static void __exit vti6_tunnel_cleanup(void) xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); unregister_pernet_device(&vti6_net_ops); + unregister_netdevice_notifier(&vti6_notifier_block); } module_init(vti6_tunnel_init); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index e4347aeb2e65..9948b5ce52da 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -576,11 +576,11 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user) /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); - return -EINVAL; + return 0; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) - return -EINVAL; + return 0; if (!pskb_may_pull(skb, fhoff + sizeof(*fhdr))) return -ENOMEM; diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index f7aab5ab93a5..f06b0471f39f 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c @@ -69,7 +69,7 @@ static unsigned int ipv6_defrag(void *priv, if (err == -EINPROGRESS) return NF_STOLEN; - return NF_ACCEPT; + return err == 0 ? NF_ACCEPT : NF_DROP; } static struct nf_hook_ops ipv6_defrag_ops[] = { diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index a5400223fd74..10090400c72f 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -156,6 +156,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) fl6.daddr = oip6h->saddr; fl6.fl6_sport = otcph->dest; fl6.fl6_dport = otcph->source; + fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); dst = ip6_route_output(net, NULL, &fl6); if (dst->error) { diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c index 8bfd470cbe72..831f86e1ec08 100644 --- a/net/ipv6/netfilter/nft_dup_ipv6.c +++ b/net/ipv6/netfilter/nft_dup_ipv6.c @@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr, { struct nft_dup_ipv6 *priv = nft_expr_priv(expr); struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr]; - int oif = regs->data[priv->sreg_dev]; + int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); } @@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr) { struct nft_dup_ipv6 *priv = nft_expr_priv(expr); - if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || + if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) + goto nla_put_failure; + if (priv->sreg_dev && nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) goto nla_put_failure; diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 7cca8ac66fe9..cd4252346a32 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -155,6 +155,8 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) if (unlikely(!skb)) return 0; + skb->protocol = htons(ETH_P_IPV6); + return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 947ed1ded026..1b57e11e6e0d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1364,6 +1364,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, if (rt6->rt6i_flags & RTF_LOCAL) return; + if (dst_metric_locked(dst, RTAX_MTU)) + return; + dst_confirm(dst); mtu = max_t(u32, mtu, IPV6_MIN_MTU); if (mtu >= dst_mtu(dst)) @@ -2758,6 +2761,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) PMTU discouvery. */ if (rt->dst.dev == arg->dev && + dst_metric_raw(&rt->dst, RTAX_MTU) && !dst_metric_locked(&rt->dst, RTAX_MTU)) { if (rt->rt6i_flags & RTF_CACHE) { /* For RTF_CACHE with rt6i_pmtu == 0 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5a27ab4eab39..b9f1fee9a886 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -818,8 +818,12 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 fl6.flowi6_proto = IPPROTO_TCP; if (rt6_need_strict(&fl6.daddr) && !oif) fl6.flowi6_oif = tcp_v6_iif(skb); - else - fl6.flowi6_oif = oif ? : skb->skb_iif; + else { + if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) + oif = skb->skb_iif; + + fl6.flowi6_oif = oif; + } fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); fl6.fl6_dport = t1->dest; @@ -1225,7 +1229,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); - if (sk_filter(sk, skb)) + if (tcp_filter(sk, skb)) goto discard; /* @@ -1453,8 +1457,10 @@ process: if (tcp_v6_inbound_md5_hash(sk, skb)) goto discard_and_relse; - if (sk_filter(sk, skb)) + if (tcp_filter(sk, skb)) goto discard_and_relse; + th = (const struct tcphdr *)skb->data; + hdr = ipv6_hdr(skb); skb->dev = NULL; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b2ef061e6836..e4a8000d59ad 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -514,7 +514,7 @@ out: return; } -static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; @@ -706,10 +706,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, if (use_hash2) { hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & - udp_table.mask; - hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; + udptable->mask; + hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask; start_lookup: - hslot = &udp_table.hash2[hash2]; + hslot = &udptable->hash2[hash2]; offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); } diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index f6eb1ab34f4b..e78bdc76dcc3 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -26,7 +26,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); -int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); void udpv6_destroy_sock(struct sock *sk); #ifdef CONFIG_PROC_FS diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 47d0d2b87106..2f5101a12283 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -45,7 +45,7 @@ struct proto udplitev6_prot = { .getsockopt = udpv6_getsockopt, .sendmsg = udpv6_sendmsg, .recvmsg = udpv6_recvmsg, - .backlog_rcv = udpv6_queue_rcv_skb, + .backlog_rcv = __udpv6_queue_rcv_skb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .get_port = udp_v6_get_port, diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index fce25afb652a..8938b6ba57a0 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -61,7 +61,8 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif if ((l2tp->conn_id == tunnel_id) && net_eq(sock_net(sk), net) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && - !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) + (!sk->sk_bound_dev_if || !dif || + sk->sk_bound_dev_if == dif)) goto found; } @@ -182,15 +183,17 @@ pass_up: struct iphdr *iph = (struct iphdr *) skb_network_header(skb); read_lock_bh(&l2tp_ip_lock); - sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id); + sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb), + tunnel_id); + if (!sk) { + read_unlock_bh(&l2tp_ip_lock); + goto discard; + } + + sock_hold(sk); read_unlock_bh(&l2tp_ip_lock); } - if (sk == NULL) - goto discard; - - sock_hold(sk); - if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; @@ -251,22 +254,17 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) int ret; int chk_addr_ret; - if (!sock_flag(sk, SOCK_ZAPPED)) - return -EINVAL; if (addr_len < sizeof(struct sockaddr_l2tpip)) return -EINVAL; if (addr->l2tp_family != AF_INET) return -EINVAL; - ret = -EADDRINUSE; - read_lock_bh(&l2tp_ip_lock); - if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, - sk->sk_bound_dev_if, addr->l2tp_conn_id)) - goto out_in_use; - - read_unlock_bh(&l2tp_ip_lock); - lock_sock(sk); + + ret = -EINVAL; + if (!sock_flag(sk, SOCK_ZAPPED)) + goto out; + if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) goto out; @@ -280,25 +278,28 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ - sk_dst_reset(sk); - - l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; write_lock_bh(&l2tp_ip_lock); + if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, + sk->sk_bound_dev_if, addr->l2tp_conn_id)) { + write_unlock_bh(&l2tp_ip_lock); + ret = -EADDRINUSE; + goto out; + } + + sk_dst_reset(sk); + l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id; + sk_add_bind_node(sk, &l2tp_ip_bind_table); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); + ret = 0; sock_reset_flag(sk, SOCK_ZAPPED); out: release_sock(sk); - return ret; - -out_in_use: - read_unlock_bh(&l2tp_ip_lock); - return ret; } @@ -307,21 +308,24 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; int rc; - if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ - return -EINVAL; - if (addr_len < sizeof(*lsa)) return -EINVAL; if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) return -EINVAL; - rc = ip4_datagram_connect(sk, uaddr, addr_len); - if (rc < 0) - return rc; - lock_sock(sk); + /* Must bind first - autobinding does not work */ + if (sock_flag(sk, SOCK_ZAPPED)) { + rc = -EINVAL; + goto out_sk; + } + + rc = __ip4_datagram_connect(sk, uaddr, addr_len); + if (rc < 0) + goto out_sk; + l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; write_lock_bh(&l2tp_ip_lock); @@ -329,7 +333,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len sk_add_bind_node(sk, &l2tp_ip_bind_table); write_unlock_bh(&l2tp_ip_lock); +out_sk: release_sock(sk); + return rc; } diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index ad3468c32b53..aa821cb639e5 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -72,8 +72,9 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net, if ((l2tp->conn_id == tunnel_id) && net_eq(sock_net(sk), net) && - !(addr && ipv6_addr_equal(addr, laddr)) && - !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) + (!addr || ipv6_addr_equal(addr, laddr)) && + (!sk->sk_bound_dev_if || !dif || + sk->sk_bound_dev_if == dif)) goto found; } @@ -196,16 +197,17 @@ pass_up: struct ipv6hdr *iph = ipv6_hdr(skb); read_lock_bh(&l2tp_ip6_lock); - sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, - 0, tunnel_id); + sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb), + tunnel_id); + if (!sk) { + read_unlock_bh(&l2tp_ip6_lock); + goto discard; + } + + sock_hold(sk); read_unlock_bh(&l2tp_ip6_lock); } - if (sk == NULL) - goto discard; - - sock_hold(sk); - if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; @@ -266,11 +268,10 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; struct net *net = sock_net(sk); __be32 v4addr = 0; + int bound_dev_if; int addr_type; int err; - if (!sock_flag(sk, SOCK_ZAPPED)) - return -EINVAL; if (addr->l2tp_family != AF_INET6) return -EINVAL; if (addr_len < sizeof(*addr)) @@ -286,41 +287,34 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (addr_type & IPV6_ADDR_MULTICAST) return -EADDRNOTAVAIL; - err = -EADDRINUSE; - read_lock_bh(&l2tp_ip6_lock); - if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, - sk->sk_bound_dev_if, addr->l2tp_conn_id)) - goto out_in_use; - read_unlock_bh(&l2tp_ip6_lock); - lock_sock(sk); err = -EINVAL; + if (!sock_flag(sk, SOCK_ZAPPED)) + goto out_unlock; + if (sk->sk_state != TCP_CLOSE) goto out_unlock; + bound_dev_if = sk->sk_bound_dev_if; + /* Check if the address belongs to the host. */ rcu_read_lock(); if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; if (addr_type & IPV6_ADDR_LINKLOCAL) { - if (addr_len >= sizeof(struct sockaddr_in6) && - addr->l2tp_scope_id) { - /* Override any existing binding, if another - * one is supplied by user. - */ - sk->sk_bound_dev_if = addr->l2tp_scope_id; - } + if (addr->l2tp_scope_id) + bound_dev_if = addr->l2tp_scope_id; /* Binding to link-local address requires an - interface */ - if (!sk->sk_bound_dev_if) + * interface. + */ + if (!bound_dev_if) goto out_unlock_rcu; err = -ENODEV; - dev = dev_get_by_index_rcu(sock_net(sk), - sk->sk_bound_dev_if); + dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if); if (!dev) goto out_unlock_rcu; } @@ -335,13 +329,22 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) } rcu_read_unlock(); - inet->inet_rcv_saddr = inet->inet_saddr = v4addr; + write_lock_bh(&l2tp_ip6_lock); + if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if, + addr->l2tp_conn_id)) { + write_unlock_bh(&l2tp_ip6_lock); + err = -EADDRINUSE; + goto out_unlock; + } + + inet->inet_saddr = v4addr; + inet->inet_rcv_saddr = v4addr; + sk->sk_bound_dev_if = bound_dev_if; sk->sk_v6_rcv_saddr = addr->l2tp_addr; np->saddr = addr->l2tp_addr; l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id; - write_lock_bh(&l2tp_ip6_lock); sk_add_bind_node(sk, &l2tp_ip6_bind_table); sk_del_node_init(sk); write_unlock_bh(&l2tp_ip6_lock); @@ -354,10 +357,7 @@ out_unlock_rcu: rcu_read_unlock(); out_unlock: release_sock(sk); - return err; -out_in_use: - read_unlock_bh(&l2tp_ip6_lock); return err; } @@ -370,9 +370,6 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_type; int rc; - if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */ - return -EINVAL; - if (addr_len < sizeof(*lsa)) return -EINVAL; @@ -389,10 +386,18 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, return -EINVAL; } - rc = ip6_datagram_connect(sk, uaddr, addr_len); - lock_sock(sk); + /* Must bind first - autobinding does not work */ + if (sock_flag(sk, SOCK_ZAPPED)) { + rc = -EINVAL; + goto out_sk; + } + + rc = __ip6_datagram_connect(sk, uaddr, addr_len); + if (rc < 0) + goto out_sk; + l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; write_lock_bh(&l2tp_ip6_lock); @@ -400,6 +405,7 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, sk_add_bind_node(sk, &l2tp_ip6_bind_table); write_unlock_bh(&l2tp_ip6_lock); +out_sk: release_sock(sk); return rc; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 78e9ecbc96e6..8e05032689f0 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) } /* No need to do anything if the driver does all */ - if (!local->ops->set_tim) + if (ieee80211_hw_check(&local->hw, AP_LINK_PS)) return; if (sta->dead) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1c56abc49627..bd5f4be89435 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1501,7 +1501,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local, struct sta_info *sta, struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct fq *fq = &local->fq; struct ieee80211_vif *vif; struct txq_info *txqi; @@ -1526,8 +1525,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local, if (!txqi) return false; - info->control.vif = vif; - spin_lock_bh(&fq->lock); ieee80211_txq_enqueue(local, txqi, skb); spin_unlock_bh(&fq->lock); @@ -3213,7 +3210,6 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata, if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; - *ieee80211_get_qos_ctl(hdr) = tid; hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); } else { info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; @@ -3338,6 +3334,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; + if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { + tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; + *ieee80211_get_qos_ctl(hdr) = tid; + } + __skb_queue_head_init(&tx.skbs); tx.flags = IEEE80211_TX_UNICAST; @@ -3426,6 +3427,11 @@ begin: goto begin; } + if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) + info->flags |= IEEE80211_TX_CTL_AMPDU; + else + info->flags &= ~IEEE80211_TX_CTL_AMPDU; + if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { struct sta_info *sta = container_of(txq->sta, struct sta_info, sta); diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index ee715764a828..6832bf6ab69f 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -270,6 +270,22 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); } + /* + * This is a workaround for VHT-enabled STAs which break the spec + * and have the VHT-MCS Rx map filled in with value 3 for all eight + * spacial streams, an example is AR9462. + * + * As per spec, in section 22.1.1 Introduction to the VHT PHY + * A VHT STA shall support at least single spactial stream VHT-MCSs + * 0 to 7 (transmit and receive) in all supported channel widths. + */ + if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) { + vht_cap->vht_supported = false; + sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n", + sta->addr); + return; + } + /* finally set up the bandwidth */ switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c3c809b2e712..a6e44ef2ec9a 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2845,7 +2845,7 @@ static struct genl_family ip_vs_genl_family = { .hdrsize = 0, .name = IPVS_GENL_NAME, .version = IPVS_GENL_VERSION, - .maxattr = IPVS_CMD_MAX, + .maxattr = IPVS_CMD_ATTR_MAX, .netnsok = true, /* Make ipvsadm to work on netns */ }; diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 1b07578bedf3..9350530c16c1 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -283,6 +283,7 @@ struct ip_vs_sync_buff { */ static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) { + memset(ho, 0, sizeof(*ho)); ho->init_seq = get_unaligned_be32(&no->init_seq); ho->delta = get_unaligned_be32(&no->delta); ho->previous_delta = get_unaligned_be32(&no->previous_delta); @@ -917,8 +918,10 @@ static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *pa kfree(param->pe_data); } - if (opt) - memcpy(&cp->in_seq, opt, sizeof(*opt)); + if (opt) { + cp->in_seq = opt->in_seq; + cp->out_seq = opt->out_seq; + } atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); cp->state = state; cp->old_state = cp->state; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index df2f5a3901df..0f87e5d21be7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -76,6 +76,7 @@ struct conntrack_gc_work { struct delayed_work dwork; u32 last_bucket; bool exiting; + long next_gc_run; }; static __read_mostly struct kmem_cache *nf_conntrack_cachep; @@ -83,9 +84,11 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock; static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); static __read_mostly bool nf_conntrack_locks_all; +/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ #define GC_MAX_BUCKETS_DIV 64u -#define GC_MAX_BUCKETS 8192u -#define GC_INTERVAL (5 * HZ) +/* upper bound of scan intervals */ +#define GC_INTERVAL_MAX (2 * HZ) +/* maximum conntracks to evict per gc run */ #define GC_MAX_EVICTS 256u static struct conntrack_gc_work conntrack_gc_work; @@ -936,13 +939,13 @@ static noinline int early_drop(struct net *net, unsigned int _hash) static void gc_worker(struct work_struct *work) { unsigned int i, goal, buckets = 0, expired_count = 0; - unsigned long next_run = GC_INTERVAL; - unsigned int ratio, scanned = 0; struct conntrack_gc_work *gc_work; + unsigned int ratio, scanned = 0; + unsigned long next_run; gc_work = container_of(work, struct conntrack_gc_work, dwork.work); - goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS); + goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; i = gc_work->last_bucket; do { @@ -982,17 +985,47 @@ static void gc_worker(struct work_struct *work) if (gc_work->exiting) return; + /* + * Eviction will normally happen from the packet path, and not + * from this gc worker. + * + * This worker is only here to reap expired entries when system went + * idle after a busy period. + * + * The heuristics below are supposed to balance conflicting goals: + * + * 1. Minimize time until we notice a stale entry + * 2. Maximize scan intervals to not waste cycles + * + * Normally, expired_count will be 0, this increases the next_run time + * to priorize 2) above. + * + * As soon as a timed-out entry is found, move towards 1) and increase + * the scan frequency. + * In case we have lots of evictions next scan is done immediately. + */ ratio = scanned ? expired_count * 100 / scanned : 0; - if (ratio >= 90 || expired_count == GC_MAX_EVICTS) + if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { + gc_work->next_gc_run = 0; next_run = 0; + } else if (expired_count) { + gc_work->next_gc_run /= 2U; + next_run = msecs_to_jiffies(1); + } else { + if (gc_work->next_gc_run < GC_INTERVAL_MAX) + gc_work->next_gc_run += msecs_to_jiffies(1); + + next_run = gc_work->next_gc_run; + } gc_work->last_bucket = i; - schedule_delayed_work(&gc_work->dwork, next_run); + queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); } static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) { INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); + gc_work->next_gc_run = GC_INTERVAL_MAX; gc_work->exiting = false; } @@ -1885,7 +1918,7 @@ int nf_conntrack_init_start(void) nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); conntrack_gc_work_init(&conntrack_gc_work); - schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL); + queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); return 0; diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 336e21559e01..7341adf7059d 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -138,9 +138,14 @@ __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) for (i = 0; i < nf_ct_helper_hsize; i++) { hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { - if (!strcmp(h->name, name) && - h->tuple.src.l3num == l3num && - h->tuple.dst.protonum == protonum) + if (strcmp(h->name, name)) + continue; + + if (h->tuple.src.l3num != NFPROTO_UNSPEC && + h->tuple.src.l3num != l3num) + continue; + + if (h->tuple.dst.protonum == protonum) return h; } } diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 621b81c7bddc..c3fc14e021ec 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1436,9 +1436,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff, handler = &sip_handlers[i]; if (handler->request == NULL) continue; - if (*datalen < handler->len || + if (*datalen < handler->len + 2 || strncasecmp(*dptr, handler->method, handler->len)) continue; + if ((*dptr)[handler->len] != ' ' || + !isalpha((*dptr)[handler->len+1])) + continue; if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, &matchoff, &matchlen) <= 0) { diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index bbb8f3df79f7..5b9c884a452e 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -42,7 +42,7 @@ struct nf_nat_conn_key { const struct nf_conntrack_zone *zone; }; -static struct rhashtable nf_nat_bysource_table; +static struct rhltable nf_nat_bysource_table; inline const struct nf_nat_l3proto * __nf_nat_l3proto_find(u8 family) @@ -193,9 +193,12 @@ static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg, const struct nf_nat_conn_key *key = arg->key; const struct nf_conn *ct = obj; - return same_src(ct, key->tuple) && - net_eq(nf_ct_net(ct), key->net) && - nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL); + if (!same_src(ct, key->tuple) || + !net_eq(nf_ct_net(ct), key->net) || + !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL)) + return 1; + + return 0; } static struct rhashtable_params nf_nat_bysource_params = { @@ -204,7 +207,6 @@ static struct rhashtable_params nf_nat_bysource_params = { .obj_cmpfn = nf_nat_bysource_cmp, .nelem_hint = 256, .min_size = 1024, - .nulls_base = (1U << RHT_BASE_SHIFT), }; /* Only called for SRC manip */ @@ -223,12 +225,15 @@ find_appropriate_src(struct net *net, .tuple = tuple, .zone = zone }; + struct rhlist_head *hl; - ct = rhashtable_lookup_fast(&nf_nat_bysource_table, &key, - nf_nat_bysource_params); - if (!ct) + hl = rhltable_lookup(&nf_nat_bysource_table, &key, + nf_nat_bysource_params); + if (!hl) return 0; + ct = container_of(hl, typeof(*ct), nat_bysource); + nf_ct_invert_tuplepr(result, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); result->dst = tuple->dst; @@ -446,11 +451,17 @@ nf_nat_setup_info(struct nf_conn *ct, } if (maniptype == NF_NAT_MANIP_SRC) { + struct nf_nat_conn_key key = { + .net = nf_ct_net(ct), + .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, + .zone = nf_ct_zone(ct), + }; int err; - err = rhashtable_insert_fast(&nf_nat_bysource_table, - &ct->nat_bysource, - nf_nat_bysource_params); + err = rhltable_insert_key(&nf_nat_bysource_table, + &key, + &ct->nat_bysource, + nf_nat_bysource_params); if (err) return NF_DROP; } @@ -567,8 +578,8 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data) * will delete entry from already-freed table. */ ct->status &= ~IPS_NAT_DONE_MASK; - rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource, - nf_nat_bysource_params); + rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, + nf_nat_bysource_params); /* don't delete conntrack. Although that would make things a lot * simpler, we'd end up flushing all conntracks on nat rmmod. @@ -698,8 +709,8 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct) if (!nat) return; - rhashtable_remove_fast(&nf_nat_bysource_table, &ct->nat_bysource, - nf_nat_bysource_params); + rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, + nf_nat_bysource_params); } static struct nf_ct_ext_type nat_extend __read_mostly = { @@ -834,13 +845,13 @@ static int __init nf_nat_init(void) { int ret; - ret = rhashtable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); + ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params); if (ret) return ret; ret = nf_ct_extend_register(&nat_extend); if (ret < 0) { - rhashtable_destroy(&nf_nat_bysource_table); + rhltable_destroy(&nf_nat_bysource_table); printk(KERN_ERR "nf_nat_core: Unable to register extension\n"); return ret; } @@ -864,7 +875,7 @@ static int __init nf_nat_init(void) return 0; cleanup_extend: - rhashtable_destroy(&nf_nat_bysource_table); + rhltable_destroy(&nf_nat_bysource_table); nf_ct_extend_unregister(&nat_extend); return ret; } @@ -883,7 +894,7 @@ static void __exit nf_nat_cleanup(void) for (i = 0; i < NFPROTO_NUMPROTO; i++) kfree(nf_nat_l4protos[i]); - rhashtable_destroy(&nf_nat_bysource_table); + rhltable_destroy(&nf_nat_bysource_table); } MODULE_LICENSE("GPL"); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 24db22257586..e5194f6f906c 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2570,7 +2570,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, } if (set->timeout && - nla_put_be64(skb, NFTA_SET_TIMEOUT, cpu_to_be64(set->timeout), + nla_put_be64(skb, NFTA_SET_TIMEOUT, + cpu_to_be64(jiffies_to_msecs(set->timeout)), NFTA_SET_PAD)) goto nla_put_failure; if (set->gc_int && @@ -2859,7 +2860,8 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, if (nla[NFTA_SET_TIMEOUT] != NULL) { if (!(flags & NFT_SET_TIMEOUT)) return -EINVAL; - timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_TIMEOUT])); + timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( + nla[NFTA_SET_TIMEOUT]))); } gc_int = 0; if (nla[NFTA_SET_GC_INTERVAL] != NULL) { @@ -2956,12 +2958,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); if (err < 0) - goto err2; + goto err3; list_add_tail_rcu(&set->list, &table->sets); table->use++; return 0; +err3: + ops->destroy(set); err2: kfree(set); err1: @@ -3176,7 +3180,8 @@ static int nf_tables_fill_setelem(struct sk_buff *skb, if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) && nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT, - cpu_to_be64(*nft_set_ext_timeout(ext)), + cpu_to_be64(jiffies_to_msecs( + *nft_set_ext_timeout(ext))), NFTA_SET_ELEM_PAD)) goto nla_put_failure; @@ -3445,21 +3450,22 @@ void *nft_set_elem_init(const struct nft_set *set, memcpy(nft_set_ext_data(ext), data, set->dlen); if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) *nft_set_ext_expiration(ext) = - jiffies + msecs_to_jiffies(timeout); + jiffies + timeout; if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) *nft_set_ext_timeout(ext) = timeout; return elem; } -void nft_set_elem_destroy(const struct nft_set *set, void *elem) +void nft_set_elem_destroy(const struct nft_set *set, void *elem, + bool destroy_expr) { struct nft_set_ext *ext = nft_set_elem_ext(set, elem); nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) nft_data_uninit(nft_set_ext_data(ext), set->dtype); - if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) + if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); kfree(elem); @@ -3532,7 +3538,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) { if (!(set->flags & NFT_SET_TIMEOUT)) return -EINVAL; - timeout = be64_to_cpu(nla_get_be64(nla[NFTA_SET_ELEM_TIMEOUT])); + timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64( + nla[NFTA_SET_ELEM_TIMEOUT]))); } else if (set->flags & NFT_SET_TIMEOUT) { timeout = set->timeout; } @@ -3565,6 +3572,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, dreg = nft_type_to_reg(set->dtype); list_for_each_entry(binding, &set->bindings, list) { struct nft_ctx bind_ctx = { + .net = ctx->net, .afi = ctx->afi, .table = ctx->table, .chain = (struct nft_chain *)binding->chain, @@ -3812,7 +3820,7 @@ void nft_set_gc_batch_release(struct rcu_head *rcu) gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); for (i = 0; i < gcb->head.cnt; i++) - nft_set_elem_destroy(gcb->head.set, gcb->elems[i]); + nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true); kfree(gcb); } EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); @@ -4030,7 +4038,7 @@ static void nf_tables_commit_release(struct nft_trans *trans) break; case NFT_MSG_DELSETELEM: nft_set_elem_destroy(nft_trans_elem_set(trans), - nft_trans_elem(trans).priv); + nft_trans_elem(trans).priv, true); break; } kfree(trans); @@ -4171,7 +4179,7 @@ static void nf_tables_abort_release(struct nft_trans *trans) break; case NFT_MSG_NEWSETELEM: nft_set_elem_destroy(nft_trans_elem_set(trans), - nft_trans_elem(trans).priv); + nft_trans_elem(trans).priv, true); break; } kfree(trans); @@ -4421,7 +4429,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, * Otherwise a 0 is returned and the attribute value is stored in the * destination variable. */ -unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) +int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) { u32 val; diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 517f08767a3c..31ca94793aa9 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -44,18 +44,22 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr, ®s->data[priv->sreg_key], ®s->data[priv->sreg_data], timeout, GFP_ATOMIC); - if (elem == NULL) { - if (set->size) - atomic_dec(&set->nelems); - return NULL; - } + if (elem == NULL) + goto err1; ext = nft_set_elem_ext(set, elem); if (priv->expr != NULL && nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) - return NULL; + goto err2; return elem; + +err2: + nft_set_elem_destroy(set, elem, false); +err1: + if (set->size) + atomic_dec(&set->nelems); + return NULL; } static void nft_dynset_eval(const struct nft_expr *expr, @@ -139,6 +143,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, return PTR_ERR(set); } + if (set->ops->update == NULL) + return -EOPNOTSUPP; + if (set->flags & NFT_SET_CONSTANT) return -EBUSY; diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index baf694de3935..d5447a22275c 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -53,6 +53,7 @@ static int nft_hash_init(const struct nft_ctx *ctx, { struct nft_hash *priv = nft_expr_priv(expr); u32 len; + int err; if (!tb[NFTA_HASH_SREG] || !tb[NFTA_HASH_DREG] || @@ -67,8 +68,10 @@ static int nft_hash_init(const struct nft_ctx *ctx, priv->sreg = nft_parse_register(tb[NFTA_HASH_SREG]); priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); - len = ntohl(nla_get_be32(tb[NFTA_HASH_LEN])); - if (len == 0 || len > U8_MAX) + err = nft_parse_u32_check(tb[NFTA_HASH_LEN], U8_MAX, &len); + if (err < 0) + return err; + if (len == 0) return -ERANGE; priv->len = len; diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c index fbc88009ca2e..8f0aaaea1376 100644 --- a/net/netfilter/nft_range.c +++ b/net/netfilter/nft_range.c @@ -59,6 +59,12 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr int err; u32 op; + if (!tb[NFTA_RANGE_SREG] || + !tb[NFTA_RANGE_OP] || + !tb[NFTA_RANGE_FROM_DATA] || + !tb[NFTA_RANGE_TO_DATA]) + return -EINVAL; + err = nft_data_init(NULL, &priv->data_from, sizeof(priv->data_from), &desc_from, tb[NFTA_RANGE_FROM_DATA]); if (err < 0) diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 3794cb2fc788..a3dface3e6e6 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -98,7 +98,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key, const struct nft_set_ext **ext) { struct nft_hash *priv = nft_set_priv(set); - struct nft_hash_elem *he; + struct nft_hash_elem *he, *prev; struct nft_hash_cmp_arg arg = { .genmask = NFT_GENMASK_ANY, .set = set, @@ -112,15 +112,24 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key, he = new(set, expr, regs); if (he == NULL) goto err1; - if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node, - nft_hash_params)) + + prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, + nft_hash_params); + if (IS_ERR(prev)) goto err2; + + /* Another cpu may race to insert the element with the same key */ + if (prev) { + nft_set_elem_destroy(set, he, true); + he = prev; + } + out: *ext = &he->ext; return true; err2: - nft_set_elem_destroy(set, he); + nft_set_elem_destroy(set, he, true); err1: return false; } @@ -332,7 +341,7 @@ static int nft_hash_init(const struct nft_set *set, static void nft_hash_elem_destroy(void *ptr, void *arg) { - nft_set_elem_destroy((const struct nft_set *)arg, ptr); + nft_set_elem_destroy((const struct nft_set *)arg, ptr, true); } static void nft_hash_destroy(const struct nft_set *set) diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 38b5bda242f8..36493a7cae88 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -266,7 +266,7 @@ static void nft_rbtree_destroy(const struct nft_set *set) while ((node = priv->root.rb_node) != NULL) { rb_erase(node, &priv->root); rbe = rb_entry(node, struct nft_rbtree_elem, node); - nft_set_elem_destroy(set, rbe); + nft_set_elem_destroy(set, rbe, true); } } diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 69f78e96fdb4..b83e158e116a 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -44,7 +44,7 @@ connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) u_int32_t newmark; ct = nf_ct_get(skb, &ctinfo); - if (ct == NULL) + if (ct == NULL || nf_ct_is_untracked(ct)) return XT_CONTINUE; switch (info->mode) { @@ -97,7 +97,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) const struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); - if (ct == NULL) + if (ct == NULL || nf_ct_is_untracked(ct)) return false; return ((ct->mark & info->mask) == info->mark) ^ info->invert; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 62bea4591054..602e5ebe9db3 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -322,14 +322,11 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) sk_mem_charge(sk, skb->truesize); } -static void netlink_sock_destruct(struct sock *sk) +static void __netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->cb_running) { - if (nlk->cb.done) - nlk->cb.done(&nlk->cb); - module_put(nlk->cb.module); kfree_skb(nlk->cb.skb); } @@ -346,6 +343,28 @@ static void netlink_sock_destruct(struct sock *sk) WARN_ON(nlk_sk(sk)->groups); } +static void netlink_sock_destruct_work(struct work_struct *work) +{ + struct netlink_sock *nlk = container_of(work, struct netlink_sock, + work); + + nlk->cb.done(&nlk->cb); + __netlink_sock_destruct(&nlk->sk); +} + +static void netlink_sock_destruct(struct sock *sk) +{ + struct netlink_sock *nlk = nlk_sk(sk); + + if (nlk->cb_running && nlk->cb.done) { + INIT_WORK(&nlk->work, netlink_sock_destruct_work); + schedule_work(&nlk->work); + return; + } + + __netlink_sock_destruct(sk); +} + /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on * SMP. Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index 3cfd6cc60504..4fdb38318977 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -3,6 +3,7 @@ #include #include +#include #include #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) @@ -33,6 +34,7 @@ struct netlink_sock { struct rhash_head node; struct rcu_head rcu; + struct work_struct work; }; static inline struct netlink_sock *nlk_sk(struct sock *sk) diff --git a/net/netlink/diag.c b/net/netlink/diag.c index b2f0e986a6f4..a5546249fb10 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -178,11 +178,8 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) } cb->args[1] = i; } else { - if (req->sdiag_protocol >= MAX_LINKS) { - read_unlock(&nl_table_lock); - rcu_read_unlock(); + if (req->sdiag_protocol >= MAX_LINKS) return -ENOENT; - } err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 23cc12639ba7..49c28e8ef01b 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -404,7 +404,7 @@ int __genl_register_family(struct genl_family *family) err = genl_validate_assign_mc_groups(family); if (err) - goto errout_locked; + goto errout_free; list_add_tail(&family->family_list, genl_family_chain(family->id)); genl_unlock_all(); @@ -417,6 +417,8 @@ int __genl_register_family(struct genl_family *family) return 0; +errout_free: + kfree(family->attrbuf); errout_locked: genl_unlock_all(); errout: diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 31045ef44a82..fecefa2dc94e 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -370,8 +370,11 @@ static int handle_fragments(struct net *net, struct sw_flow_key *key, skb_orphan(skb); memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); err = nf_ct_frag6_gather(net, skb, user); - if (err) + if (err) { + if (err != -EINPROGRESS) + kfree_skb(skb); return err; + } key->ip.proto = ipv6_hdr(skb)->nexthdr; ovs_cb.mru = IP6CB(skb)->frag_max_size; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d2238b204691..dd2332390c45 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -3648,19 +3648,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: case TPACKET_V2: case TPACKET_V3: - po->tp_version = val; - return 0; + break; default: return -EINVAL; } + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->tp_version = val; + ret = 0; + } + release_sock(sk); + return ret; } case PACKET_RESERVE: { @@ -4164,6 +4170,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; + lock_sock(sk); /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { net_warn_ratelimited("Tx-ring is not supported.\n"); @@ -4245,7 +4252,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, goto out; } - lock_sock(sk); /* Detach socket from network */ spin_lock(&po->bind_lock); @@ -4294,11 +4300,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, if (!tx_ring) prb_shutdown_retire_blk_timer(po, rb_queue); } - release_sock(sk); if (pg_vec) free_pg_vec(pg_vec, order, req->tp_block_nr); out: + release_sock(sk); return err; } diff --git a/net/rds/tcp.c b/net/rds/tcp.c index fcddacc92e01..20e2923dc827 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -659,6 +659,8 @@ out_recv: out_pernet: unregister_pernet_subsys(&rds_tcp_net_ops); out_slab: + if (unregister_netdevice_notifier(&rds_tcp_dev_notifier)) + pr_warn("could not unregister rds_tcp_dev_notifier\n"); kmem_cache_destroy(rds_tcp_conn_slab); out: return ret; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index b54d56d4959b..cf9b2fe8eac6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -108,6 +108,17 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind) kfree(keys); } +static bool offset_valid(struct sk_buff *skb, int offset) +{ + if (offset > 0 && offset > skb->len) + return false; + + if (offset < 0 && -offset > skb_headroom(skb)) + return false; + + return true; +} + static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { @@ -134,6 +145,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, if (tkey->offmask) { char *d, _d; + if (!offset_valid(skb, off + tkey->at)) { + pr_info("tc filter pedit 'at' offset %d out of bounds\n", + off + tkey->at); + goto bad; + } d = skb_header_pointer(skb, off + tkey->at, 1, &_d); if (!d) @@ -146,10 +162,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a, " offset must be on 32 bit boundaries\n"); goto bad; } - if (offset > 0 && offset > skb->len) { - pr_info("tc filter pedit" - " offset %d can't exceed pkt length %d\n", - offset, skb->len); + + if (!offset_valid(skb, off + offset)) { + pr_info("tc filter pedit offset %d out of bounds\n", + offset); goto bad; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2b2a7974e4bb..b05d4a2155b0 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -430,7 +430,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, if (!skb) return -ENOBUFS; - if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { + if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, + n->nlmsg_flags, event) <= 0) { kfree_skb(skb); return -EINVAL; } diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index eb219b78cd49..5877f6061b57 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -62,9 +62,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle) struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f; - if (head == NULL) - return 0UL; - list_for_each_entry(f, &head->flist, link) { if (f->handle == handle) { l = (unsigned long) f; @@ -109,7 +106,6 @@ static bool basic_destroy(struct tcf_proto *tp, bool force) tcf_unbind_filter(tp, &f->res); call_rcu(&f->rcu, basic_delete_filter); } - RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; } diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index bb1d5a487081..0a47ba5e6109 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -292,7 +292,6 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force) call_rcu(&prog->rcu, __cls_bpf_delete_prog); } - RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; } @@ -303,9 +302,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) struct cls_bpf_prog *prog; unsigned long ret = 0UL; - if (head == NULL) - return 0UL; - list_for_each_entry(prog, &head->plist, link) { if (prog->handle == handle) { ret = (unsigned long) prog; diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 85233c470035..c1f20077837f 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -137,11 +137,10 @@ static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force) if (!force) return false; - - if (head) { - RCU_INIT_POINTER(tp->root, NULL); + /* Head can still be NULL due to cls_cgroup_init(). */ + if (head) call_rcu(&head->rcu, cls_cgroup_destroy_rcu); - } + return true; } diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index e39672394c7b..6575aba87630 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -596,7 +596,6 @@ static bool flow_destroy(struct tcf_proto *tp, bool force) list_del_rcu(&f->list); call_rcu(&f->rcu, flow_destroy_filter); } - RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index f6f40fba599b..904442421db3 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -64,7 +65,10 @@ struct cls_fl_head { bool mask_assigned; struct list_head filters; struct rhashtable_params ht_params; - struct rcu_head rcu; + union { + struct work_struct work; + struct rcu_head rcu; + }; }; struct cls_fl_filter { @@ -269,6 +273,24 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol, &tc); } +static void fl_destroy_sleepable(struct work_struct *work) +{ + struct cls_fl_head *head = container_of(work, struct cls_fl_head, + work); + if (head->mask_assigned) + rhashtable_destroy(&head->ht); + kfree(head); + module_put(THIS_MODULE); +} + +static void fl_destroy_rcu(struct rcu_head *rcu) +{ + struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu); + + INIT_WORK(&head->work, fl_destroy_sleepable); + schedule_work(&head->work); +} + static bool fl_destroy(struct tcf_proto *tp, bool force) { struct cls_fl_head *head = rtnl_dereference(tp->root); @@ -282,10 +304,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force) list_del_rcu(&f->list); call_rcu(&f->rcu, fl_destroy_filter); } - RCU_INIT_POINTER(tp->root, NULL); - if (head->mask_assigned) - rhashtable_destroy(&head->ht); - kfree_rcu(head, rcu); + + __module_get(THIS_MODULE); + call_rcu(&head->rcu, fl_destroy_rcu); return true; } @@ -711,8 +732,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, goto errout; if (fold) { - rhashtable_remove_fast(&head->ht, &fold->ht_node, - head->ht_params); + if (!tc_skip_sw(fold->flags)) + rhashtable_remove_fast(&head->ht, &fold->ht_node, + head->ht_params); fl_hw_destroy_filter(tp, (unsigned long)fold); } @@ -739,8 +761,9 @@ static int fl_delete(struct tcf_proto *tp, unsigned long arg) struct cls_fl_head *head = rtnl_dereference(tp->root); struct cls_fl_filter *f = (struct cls_fl_filter *) arg; - rhashtable_remove_fast(&head->ht, &f->ht_node, - head->ht_params); + if (!tc_skip_sw(f->flags)) + rhashtable_remove_fast(&head->ht, &f->ht_node, + head->ht_params); list_del_rcu(&f->list); fl_hw_destroy_filter(tp, (unsigned long)f); tcf_unbind_filter(tp, &f->res); diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 25927b6c4436..f935429bd5ef 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -114,7 +114,6 @@ static bool mall_destroy(struct tcf_proto *tp, bool force) call_rcu(&f->rcu, mall_destroy_filter); } - RCU_INIT_POINTER(tp->root, NULL); kfree_rcu(head, rcu); return true; } diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 4f05a19fb073..322438fb3ffc 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -152,7 +152,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp, return -1; nhptr = ip_hdr(skb); #endif - + if (unlikely(!head)) + return -1; restart: #if RSVP_DST_LEN == 4 diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 96144bdf30db..0751245a6ace 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -543,7 +543,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force) walker.fn = tcindex_destroy_element; tcindex_walk(tp, &walker); - RCU_INIT_POINTER(tp->root, NULL); call_rcu(&p->rcu, __tcindex_destroy); return true; } diff --git a/net/sctp/input.c b/net/sctp/input.c index a2ea1d1cc06a..a01a56ec8b8c 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -181,9 +181,10 @@ int sctp_rcv(struct sk_buff *skb) * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB */ if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { - if (asoc) { - sctp_association_put(asoc); + if (transport) { + sctp_transport_put(transport); asoc = NULL; + transport = NULL; } else { sctp_endpoint_put(ep); ep = NULL; @@ -269,8 +270,8 @@ int sctp_rcv(struct sk_buff *skb) bh_unlock_sock(sk); /* Release the asoc/ep ref we took in the lookup calls. */ - if (asoc) - sctp_association_put(asoc); + if (transport) + sctp_transport_put(transport); else sctp_endpoint_put(ep); @@ -283,8 +284,8 @@ discard_it: discard_release: /* Release the asoc/ep ref we took in the lookup calls. */ - if (asoc) - sctp_association_put(asoc); + if (transport) + sctp_transport_put(transport); else sctp_endpoint_put(ep); @@ -300,6 +301,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) { struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; struct sctp_inq *inqueue = &chunk->rcvr->inqueue; + struct sctp_transport *t = chunk->transport; struct sctp_ep_common *rcvr = NULL; int backloged = 0; @@ -351,7 +353,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) done: /* Release the refs we took in sctp_add_backlog */ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) - sctp_association_put(sctp_assoc(rcvr)); + sctp_transport_put(t); else if (SCTP_EP_TYPE_SOCKET == rcvr->type) sctp_endpoint_put(sctp_ep(rcvr)); else @@ -363,6 +365,7 @@ done: static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) { struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; + struct sctp_transport *t = chunk->transport; struct sctp_ep_common *rcvr = chunk->rcvr; int ret; @@ -373,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) * from us */ if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) - sctp_association_hold(sctp_assoc(rcvr)); + sctp_transport_hold(t); else if (SCTP_EP_TYPE_SOCKET == rcvr->type) sctp_endpoint_hold(sctp_ep(rcvr)); else @@ -537,15 +540,15 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, return sk; out: - sctp_association_put(asoc); + sctp_transport_put(transport); return NULL; } /* Common cleanup code for icmp/icmpv6 error handler. */ -void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) +void sctp_err_finish(struct sock *sk, struct sctp_transport *t) { bh_unlock_sock(sk); - sctp_association_put(asoc); + sctp_transport_put(t); } /* @@ -641,7 +644,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) } out_unlock: - sctp_err_finish(sk, asoc); + sctp_err_finish(sk, transport); } /* @@ -952,11 +955,8 @@ static struct sctp_association *__sctp_lookup_association( goto out; asoc = t->asoc; - sctp_association_hold(asoc); *pt = t; - sctp_transport_put(t); - out: return asoc; } @@ -986,7 +986,7 @@ int sctp_has_association(struct net *net, struct sctp_transport *transport; if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { - sctp_association_put(asoc); + sctp_transport_put(transport); return 1; } @@ -1021,7 +1021,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, struct sctphdr *sh = sctp_hdr(skb); union sctp_params params; sctp_init_chunk_t *init; - struct sctp_transport *transport; struct sctp_af *af; /* @@ -1052,7 +1051,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, af->from_addr_param(paddr, params.addr, sh->source, 0); - asoc = __sctp_lookup_association(net, laddr, paddr, &transport); + asoc = __sctp_lookup_association(net, laddr, paddr, transportp); if (asoc) return asoc; } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f473779e8b1c..176af3080a2b 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -198,7 +198,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } out_unlock: - sctp_err_finish(sk, asoc); + sctp_err_finish(sk, transport); out: if (likely(idev != NULL)) in6_dev_put(idev); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9fbb6feb8c27..f23ad913dc7a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1214,9 +1214,12 @@ static int __sctp_connect(struct sock *sk, timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); - err = sctp_wait_for_connect(asoc, &timeo); - if ((err == 0 || err == -EINPROGRESS) && assoc_id) + if (assoc_id) *assoc_id = asoc->assoc_id; + err = sctp_wait_for_connect(asoc, &timeo); + /* Note: the asoc may be freed after the return of + * sctp_wait_for_connect. + */ /* Don't free association on exit. */ asoc = NULL; @@ -4282,19 +4285,18 @@ static void sctp_shutdown(struct sock *sk, int how) { struct net *net = sock_net(sk); struct sctp_endpoint *ep; - struct sctp_association *asoc; if (!sctp_style(sk, TCP)) return; - if (how & SEND_SHUTDOWN) { + ep = sctp_sk(sk)->ep; + if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { + struct sctp_association *asoc; + sk->sk_state = SCTP_SS_CLOSING; - ep = sctp_sk(sk)->ep; - if (!list_empty(&ep->asocs)) { - asoc = list_entry(ep->asocs.next, - struct sctp_association, asocs); - sctp_primitive_SHUTDOWN(net, asoc, NULL); - } + asoc = list_entry(ep->asocs.next, + struct sctp_association, asocs); + sctp_primitive_SHUTDOWN(net, asoc, NULL); } } @@ -4480,12 +4482,9 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), if (!transport || !sctp_transport_hold(transport)) goto out; - sctp_association_hold(transport->asoc); - sctp_transport_put(transport); - rcu_read_unlock(); err = cb(transport, p); - sctp_association_put(transport->asoc); + sctp_transport_put(transport); out: return err; diff --git a/net/socket.c b/net/socket.c index 5a9bf5ee2464..73dc69f9681e 100644 --- a/net/socket.c +++ b/net/socket.c @@ -341,8 +341,23 @@ static const struct xattr_handler sockfs_xattr_handler = { .get = sockfs_xattr_get, }; +static int sockfs_security_xattr_set(const struct xattr_handler *handler, + struct dentry *dentry, struct inode *inode, + const char *suffix, const void *value, + size_t size, int flags) +{ + /* Handled by LSM. */ + return -EAGAIN; +} + +static const struct xattr_handler sockfs_security_xattr_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .set = sockfs_security_xattr_set, +}; + static const struct xattr_handler *sockfs_xattr_handlers[] = { &sockfs_xattr_handler, + &sockfs_security_xattr_handler, NULL }; @@ -2038,6 +2053,8 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, if (err) break; ++datagrams; + if (msg_data_left(&msg_sys)) + break; cond_resched(); } diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index c3f652395a80..3bc1d61694cb 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -1002,14 +1002,8 @@ static void svc_age_temp_xprts(unsigned long closure) void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) { struct svc_xprt *xprt; - struct svc_sock *svsk; - struct socket *sock; struct list_head *le, *next; LIST_HEAD(to_be_closed); - struct linger no_linger = { - .l_onoff = 1, - .l_linger = 0, - }; spin_lock_bh(&serv->sv_lock); list_for_each_safe(le, next, &serv->sv_tempsocks) { @@ -1027,10 +1021,7 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) list_del_init(le); xprt = list_entry(le, struct svc_xprt, xpt_list); dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); - svsk = container_of(xprt, struct svc_sock, sk_xprt); - sock = svsk->sk_sock; - kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, - (char *)&no_linger, sizeof(no_linger)); + xprt->xpt_ops->xpo_kill_temp_xprt(xprt); svc_close_xprt(xprt); } } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 57625f64efd5..a4bc98265d88 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -438,6 +438,21 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt) return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); } +static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt) +{ + struct svc_sock *svsk; + struct socket *sock; + struct linger no_linger = { + .l_onoff = 1, + .l_linger = 0, + }; + + svsk = container_of(xprt, struct svc_sock, sk_xprt); + sock = svsk->sk_sock; + kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, + (char *)&no_linger, sizeof(no_linger)); +} + /* * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo */ @@ -648,6 +663,10 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt) return NULL; } +static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt) +{ +} + static struct svc_xprt *svc_udp_create(struct svc_serv *serv, struct net *net, struct sockaddr *sa, int salen, @@ -667,6 +686,7 @@ static struct svc_xprt_ops svc_udp_ops = { .xpo_has_wspace = svc_udp_has_wspace, .xpo_accept = svc_udp_accept, .xpo_secure_port = svc_sock_secure_port, + .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt, }; static struct svc_xprt_class svc_udp_class = { @@ -1242,6 +1262,7 @@ static struct svc_xprt_ops svc_tcp_ops = { .xpo_has_wspace = svc_tcp_has_wspace, .xpo_accept = svc_tcp_accept, .xpo_secure_port = svc_sock_secure_port, + .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt, }; static struct svc_xprt_class svc_tcp_class = { diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 6864fb967038..1334de2715c2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -67,6 +67,7 @@ static void svc_rdma_detach(struct svc_xprt *xprt); static void svc_rdma_free(struct svc_xprt *xprt); static int svc_rdma_has_wspace(struct svc_xprt *xprt); static int svc_rdma_secure_port(struct svc_rqst *); +static void svc_rdma_kill_temp_xprt(struct svc_xprt *); static struct svc_xprt_ops svc_rdma_ops = { .xpo_create = svc_rdma_create, @@ -79,6 +80,7 @@ static struct svc_xprt_ops svc_rdma_ops = { .xpo_has_wspace = svc_rdma_has_wspace, .xpo_accept = svc_rdma_accept, .xpo_secure_port = svc_rdma_secure_port, + .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt, }; struct svc_xprt_class svc_rdma_class = { @@ -1317,6 +1319,10 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp) return 1; } +static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt) +{ +} + int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) { struct ib_send_wr *bad_wr, *n_wr; diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 975dbeb60ab0..52d74760fb68 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -421,6 +421,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b, dev = dev_get_by_name(net, driver_name); if (!dev) return -ENODEV; + if (tipc_mtu_bad(dev, 0)) { + dev_put(dev); + return -EINVAL; + } /* Associate TIPC bearer with L2 bearer */ rcu_assign_pointer(b->media_ptr, dev); @@ -610,8 +614,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, if (!b) return NOTIFY_DONE; - b->mtu = dev->mtu; - switch (evt) { case NETDEV_CHANGE: if (netif_carrier_ok(dev)) @@ -624,6 +626,11 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, tipc_reset_bearer(net, b); break; case NETDEV_CHANGEMTU: + if (tipc_mtu_bad(dev, 0)) { + bearer_disable(net, b); + break; + } + b->mtu = dev->mtu; tipc_reset_bearer(net, b); break; case NETDEV_CHANGEADDR: diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 78892e2f53e3..278ff7f616f9 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -39,6 +39,7 @@ #include "netlink.h" #include "core.h" +#include "msg.h" #include #define MAX_MEDIA 3 @@ -59,6 +60,9 @@ #define TIPC_MEDIA_TYPE_IB 2 #define TIPC_MEDIA_TYPE_UDP 3 +/* minimum bearer MTU */ +#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE) + /** * struct tipc_media_addr - destination address used by TIPC bearers * @value: address info (format defined by media) @@ -215,4 +219,13 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id, void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, struct sk_buff_head *xmitq); +/* check if device MTU is too low for tipc headers */ +static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve) +{ + if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve) + return false; + netdev_warn(dev, "MTU too low for tipc bearer\n"); + return true; +} + #endif /* _TIPC_BEARER_H */ diff --git a/net/tipc/link.c b/net/tipc/link.c index 1055164c6232..bda89bf9f4ff 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -47,8 +47,8 @@ #include struct tipc_stats { - u32 sent_info; /* used in counting # sent packets */ - u32 recv_info; /* used in counting # recv'd packets */ + u32 sent_pkts; + u32 recv_pkts; u32 sent_states; u32 recv_states; u32 sent_probes; @@ -857,7 +857,6 @@ void tipc_link_reset(struct tipc_link *l) l->acked = 0; l->silent_intv_cnt = 0; l->rst_cnt = 0; - l->stats.recv_info = 0; l->stale_count = 0; l->bc_peer_is_up = false; memset(&l->mon_state, 0, sizeof(l->mon_state)); @@ -888,6 +887,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, struct sk_buff_head *transmq = &l->transmq; struct sk_buff_head *backlogq = &l->backlogq; struct sk_buff *skb, *_skb, *bskb; + int pkt_cnt = skb_queue_len(list); /* Match msg importance against this and all higher backlog limits: */ if (!skb_queue_empty(backlogq)) { @@ -901,6 +901,11 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, return -EMSGSIZE; } + if (pkt_cnt > 1) { + l->stats.sent_fragmented++; + l->stats.sent_fragments += pkt_cnt; + } + /* Prepare each packet for sending, and add to relevant queue: */ while (skb_queue_len(list)) { skb = skb_peek(list); @@ -920,6 +925,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, __skb_queue_tail(xmitq, _skb); TIPC_SKB_CB(skb)->ackers = l->ackers; l->rcv_unacked = 0; + l->stats.sent_pkts++; seqno++; continue; } @@ -968,6 +974,7 @@ void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq) msg_set_ack(hdr, ack); msg_set_bcast_ack(hdr, bc_ack); l->rcv_unacked = 0; + l->stats.sent_pkts++; seqno++; } l->snd_nxt = seqno; @@ -1260,7 +1267,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, /* Deliver packet */ l->rcv_nxt++; - l->stats.recv_info++; + l->stats.recv_pkts++; if (!tipc_data_input(l, skb, l->inputq)) rc |= tipc_link_input(l, skb, l->inputq); if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) @@ -1492,8 +1499,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) l->tolerance = peers_tol; - if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI, - TIPC_MAX_LINK_PRI)) { + /* Update own prio if peer indicates a different value */ + if ((peers_prio != l->priority) && + in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { l->priority = peers_prio; rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); } @@ -1799,10 +1807,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) void tipc_link_reset_stats(struct tipc_link *l) { memset(&l->stats, 0, sizeof(l->stats)); - if (!link_is_bc_sndlink(l)) { - l->stats.sent_info = l->snd_nxt; - l->stats.recv_info = l->rcv_nxt; - } } static void link_print(struct tipc_link *l, const char *str) @@ -1866,12 +1870,12 @@ static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) }; struct nla_map map[] = { - {TIPC_NLA_STATS_RX_INFO, s->recv_info}, + {TIPC_NLA_STATS_RX_INFO, 0}, {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, - {TIPC_NLA_STATS_TX_INFO, s->sent_info}, + {TIPC_NLA_STATS_TX_INFO, 0}, {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, @@ -1946,9 +1950,9 @@ int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, goto attr_msg_full; if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) goto attr_msg_full; - if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt)) + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts)) goto attr_msg_full; - if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt)) + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts)) goto attr_msg_full; if (tipc_link_is_up(link)) @@ -2003,12 +2007,12 @@ static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, }; struct nla_map map[] = { - {TIPC_NLA_STATS_RX_INFO, stats->recv_info}, + {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts}, {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, - {TIPC_NLA_STATS_TX_INFO, stats->sent_info}, + {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts}, {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, @@ -2075,9 +2079,9 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) goto attr_msg_full; if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) goto attr_msg_full; - if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt)) + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0)) goto attr_msg_full; - if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt)) + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0)) goto attr_msg_full; prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index ed97a5876ebe..9e109bb1a207 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -455,14 +455,14 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, int i, applied_bef; state->probing = false; - if (!dlen) - return; /* Sanity check received domain record */ - if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) { - pr_warn_ratelimited("Received illegal domain record\n"); + if (dlen < dom_rec_len(arrv_dom, 0)) + return; + if (dlen != dom_rec_len(arrv_dom, new_member_cnt)) + return; + if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) return; - } /* Synch generation numbers with peer if link just came up */ if (!state->synched) { diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f9f5f3c3dab5..41f013888f07 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1,7 +1,7 @@ /* * net/tipc/socket.c: TIPC socket API * - * Copyright (c) 2001-2007, 2012-2015, Ericsson AB + * Copyright (c) 2001-2007, 2012-2016, Ericsson AB * Copyright (c) 2004-2008, 2010-2013, Wind River Systems * All rights reserved. * @@ -129,54 +129,8 @@ static const struct proto_ops packet_ops; static const struct proto_ops stream_ops; static const struct proto_ops msg_ops; static struct proto tipc_proto; - static const struct rhashtable_params tsk_rht_params; -/* - * Revised TIPC socket locking policy: - * - * Most socket operations take the standard socket lock when they start - * and hold it until they finish (or until they need to sleep). Acquiring - * this lock grants the owner exclusive access to the fields of the socket - * data structures, with the exception of the backlog queue. A few socket - * operations can be done without taking the socket lock because they only - * read socket information that never changes during the life of the socket. - * - * Socket operations may acquire the lock for the associated TIPC port if they - * need to perform an operation on the port. If any routine needs to acquire - * both the socket lock and the port lock it must take the socket lock first - * to avoid the risk of deadlock. - * - * The dispatcher handling incoming messages cannot grab the socket lock in - * the standard fashion, since invoked it runs at the BH level and cannot block. - * Instead, it checks to see if the socket lock is currently owned by someone, - * and either handles the message itself or adds it to the socket's backlog - * queue; in the latter case the queued message is processed once the process - * owning the socket lock releases it. - * - * NOTE: Releasing the socket lock while an operation is sleeping overcomes - * the problem of a blocked socket operation preventing any other operations - * from occurring. However, applications must be careful if they have - * multiple threads trying to send (or receive) on the same socket, as these - * operations might interfere with each other. For example, doing a connect - * and a receive at the same time might allow the receive to consume the - * ACK message meant for the connect. While additional work could be done - * to try and overcome this, it doesn't seem to be worthwhile at the present. - * - * NOTE: Releasing the socket lock while an operation is sleeping also ensures - * that another operation that must be performed in a non-blocking manner is - * not delayed for very long because the lock has already been taken. - * - * NOTE: This code assumes that certain fields of a port/socket pair are - * constant over its lifetime; such fields can be examined without taking - * the socket lock and/or port lock, and do not need to be re-read even - * after resuming processing after waiting. These fields include: - * - socket type - * - pointer to socket sk structure (aka tipc_sock structure) - * - pointer to port structure - * - port reference - */ - static u32 tsk_own_node(struct tipc_sock *tsk) { return msg_prevnode(&tsk->phdr); @@ -232,7 +186,7 @@ static struct tipc_sock *tipc_sk(const struct sock *sk) static bool tsk_conn_cong(struct tipc_sock *tsk) { - return tsk->snt_unacked >= tsk->snd_win; + return tsk->snt_unacked > tsk->snd_win; } /* tsk_blocks(): translate a buffer size in bytes to number of diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 78cab9c5a445..b58dc95f3d35 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -697,6 +697,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, udp_conf.local_ip.s_addr = htonl(INADDR_ANY); udp_conf.use_udp_checksums = false; ub->ifindex = dev->ifindex; + if (tipc_mtu_bad(dev, sizeof(struct iphdr) + + sizeof(struct udphdr))) { + err = -EINVAL; + goto err; + } b->mtu = dev->mtu - sizeof(struct iphdr) - sizeof(struct udphdr); #if IS_ENABLED(CONFIG_IPV6) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 145082e2ba36..2358f2690ec5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2199,7 +2199,8 @@ out: * Sleep until more data has arrived. But check for races.. */ static long unix_stream_data_wait(struct sock *sk, long timeo, - struct sk_buff *last, unsigned int last_len) + struct sk_buff *last, unsigned int last_len, + bool freezable) { struct sk_buff *tail; DEFINE_WAIT(wait); @@ -2220,7 +2221,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); unix_state_unlock(sk); - timeo = freezable_schedule_timeout(timeo); + if (freezable) + timeo = freezable_schedule_timeout(timeo); + else + timeo = schedule_timeout(timeo); unix_state_lock(sk); if (sock_flag(sk, SOCK_DEAD)) @@ -2250,7 +2254,8 @@ struct unix_stream_read_state { unsigned int splice_flags; }; -static int unix_stream_read_generic(struct unix_stream_read_state *state) +static int unix_stream_read_generic(struct unix_stream_read_state *state, + bool freezable) { struct scm_cookie scm; struct socket *sock = state->socket; @@ -2330,7 +2335,7 @@ again: mutex_unlock(&u->iolock); timeo = unix_stream_data_wait(sk, timeo, last, - last_len); + last_len, freezable); if (signal_pending(current)) { err = sock_intr_errno(timeo); @@ -2472,7 +2477,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, .flags = flags }; - return unix_stream_read_generic(&state); + return unix_stream_read_generic(&state, true); } static int unix_stream_splice_actor(struct sk_buff *skb, @@ -2503,7 +2508,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos, flags & SPLICE_F_NONBLOCK) state.flags = MSG_DONTWAIT; - return unix_stream_read_generic(&state); + return unix_stream_read_generic(&state, false); } static int unix_shutdown(struct socket *sock, int mode) @@ -2812,7 +2817,8 @@ static int unix_seq_show(struct seq_file *seq, void *v) i++; } for ( ; i < len; i++) - seq_putc(seq, u->addr->name->sun_path[i]); + seq_putc(seq, u->addr->name->sun_path[i] ?: + '@'); } unix_state_unlock(s); seq_putc(seq, '\n'); diff --git a/net/wireless/core.h b/net/wireless/core.h index 08d2e948c9ad..f0c0c8a48c92 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -71,6 +71,7 @@ struct cfg80211_registered_device { struct list_head bss_list; struct rb_root bss_tree; u32 bss_generation; + u32 bss_entries; struct cfg80211_scan_request *scan_req; /* protected by RTNL */ struct sk_buff *scan_msg; struct cfg80211_sched_scan_request __rcu *sched_scan_req; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index b5bd58d0f731..35ad69fd0838 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -57,6 +57,19 @@ * also linked into the probe response struct. */ +/* + * Limit the number of BSS entries stored in mac80211. Each one is + * a bit over 4k at most, so this limits to roughly 4-5M of memory. + * If somebody wants to really attack this though, they'd likely + * use small beacons, and only one type of frame, limiting each of + * the entries to a much smaller size (in order to generate more + * entries in total, so overhead is bigger.) + */ +static int bss_entries_limit = 1000; +module_param(bss_entries_limit, int, 0644); +MODULE_PARM_DESC(bss_entries_limit, + "limit to number of scan BSS entries (per wiphy, default 1000)"); + #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) static void bss_free(struct cfg80211_internal_bss *bss) @@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev, list_del_init(&bss->list); rb_erase(&bss->rbn, &rdev->bss_tree); + rdev->bss_entries--; + WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list), + "rdev bss entries[%d]/list[empty:%d] corruption\n", + rdev->bss_entries, list_empty(&rdev->bss_list)); bss_ref_put(rdev, bss); return true; } @@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev, rdev->bss_generation++; } +static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) +{ + struct cfg80211_internal_bss *bss, *oldest = NULL; + bool ret; + + lockdep_assert_held(&rdev->bss_lock); + + list_for_each_entry(bss, &rdev->bss_list, list) { + if (atomic_read(&bss->hold)) + continue; + + if (!list_empty(&bss->hidden_list) && + !bss->pub.hidden_beacon_bss) + continue; + + if (oldest && time_before(oldest->ts, bss->ts)) + continue; + oldest = bss; + } + + if (WARN_ON(!oldest)) + return false; + + /* + * The callers make sure to increase rdev->bss_generation if anything + * gets removed (and a new entry added), so there's no need to also do + * it here. + */ + + ret = __cfg80211_unlink_bss(rdev, oldest); + WARN_ON(!ret); + return ret; +} + void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool send_message) { @@ -689,6 +740,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, const u8 *ie; int i, ssidlen; u8 fold = 0; + u32 n_entries = 0; ies = rcu_access_pointer(new->pub.beacon_ies); if (WARN_ON(!ies)) @@ -712,6 +764,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, /* This is the bad part ... */ list_for_each_entry(bss, &rdev->bss_list, list) { + /* + * we're iterating all the entries anyway, so take the + * opportunity to validate the list length accounting + */ + n_entries++; + if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) continue; if (bss->pub.channel != new->pub.channel) @@ -740,6 +798,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, new->pub.beacon_ies); } + WARN_ONCE(n_entries != rdev->bss_entries, + "rdev bss entries[%d]/list[len:%d] corruption\n", + rdev->bss_entries, n_entries); + return true; } @@ -894,7 +956,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, } } + if (rdev->bss_entries >= bss_entries_limit && + !cfg80211_bss_expire_oldest(rdev)) { + kfree(new); + goto drop; + } + list_add_tail(&new->list, &rdev->bss_list); + rdev->bss_entries++; rb_insert_bss(rdev, new); found = new; } diff --git a/net/wireless/util.c b/net/wireless/util.c index 5ea12afc7706..659b507b347d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1158,7 +1158,8 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) 58500000, 65000000, 78000000, - 0, + /* not in the spec, but some devices use this: */ + 86500000, }, { 13500000, 27000000, diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index fd6986634e6f..5bf7e1bfeac7 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1268,12 +1268,14 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, err = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, policy_to_flow_dir(dir)); - if (!err && !xfrm_pol_hold_rcu(pol)) - goto again; - else if (err == -ESRCH) + if (!err) { + if (!xfrm_pol_hold_rcu(pol)) + goto again; + } else if (err == -ESRCH) { pol = NULL; - else + } else { pol = ERR_PTR(err); + } } else pol = NULL; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 08892091cfe3..671a1d0333f0 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2450,7 +2450,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) #ifdef CONFIG_COMPAT if (in_compat_syscall()) - return -ENOTSUPP; + return -EOPNOTSUPP; #endif type = nlh->nlmsg_type; diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 12b7304d55dc..72c58675973e 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -27,6 +27,7 @@ hostprogs-y += xdp2 hostprogs-y += test_current_task_under_cgroup hostprogs-y += trace_event hostprogs-y += sampleip +hostprogs-y += tc_l2_redirect test_verifier-objs := test_verifier.o libbpf.o test_maps-objs := test_maps.o libbpf.o @@ -56,6 +57,7 @@ test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \ test_current_task_under_cgroup_user.o trace_event-objs := bpf_load.o libbpf.o trace_event_user.o sampleip-objs := bpf_load.o libbpf.o sampleip_user.o +tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -72,6 +74,7 @@ always += test_probe_write_user_kern.o always += trace_output_kern.o always += tcbpf1_kern.o always += tcbpf2_kern.o +always += tc_l2_redirect_kern.o always += lathist_kern.o always += offwaketime_kern.o always += spintest_kern.o @@ -111,6 +114,7 @@ HOSTLOADLIBES_xdp2 += -lelf HOSTLOADLIBES_test_current_task_under_cgroup += -lelf HOSTLOADLIBES_trace_event += -lelf HOSTLOADLIBES_sampleip += -lelf +HOSTLOADLIBES_tc_l2_redirect += -l elf # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h index 90f44bd2045e..dadd5161bd91 100644 --- a/samples/bpf/bpf_helpers.h +++ b/samples/bpf/bpf_helpers.h @@ -113,7 +113,7 @@ static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */ #define PT_REGS_RC(x) ((x)->gprs[2]) #define PT_REGS_SP(x) ((x)->gprs[15]) -#define PT_REGS_IP(x) ((x)->ip) +#define PT_REGS_IP(x) ((x)->psw.addr) #elif defined(__aarch64__) diff --git a/samples/bpf/sampleip_kern.c b/samples/bpf/sampleip_kern.c index 774a681f374a..ceabf31079cf 100644 --- a/samples/bpf/sampleip_kern.c +++ b/samples/bpf/sampleip_kern.c @@ -25,7 +25,7 @@ int do_sample(struct bpf_perf_event_data *ctx) u64 ip; u32 *value, init_val = 1; - ip = ctx->regs.ip; + ip = PT_REGS_IP(&ctx->regs); value = bpf_map_lookup_elem(&ip_map, &ip); if (value) *value += 1; diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh new file mode 100755 index 000000000000..80a05591a140 --- /dev/null +++ b/samples/bpf/tc_l2_redirect.sh @@ -0,0 +1,173 @@ +#!/bin/bash + +[[ -z $TC ]] && TC='tc' +[[ -z $IP ]] && IP='ip' + +REDIRECT_USER='./tc_l2_redirect' +REDIRECT_BPF='./tc_l2_redirect_kern.o' + +RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter) +IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding) + +function config_common { + local tun_type=$1 + + $IP netns add ns1 + $IP netns add ns2 + $IP link add ve1 type veth peer name vens1 + $IP link add ve2 type veth peer name vens2 + $IP link set dev ve1 up + $IP link set dev ve2 up + $IP link set dev ve1 mtu 1500 + $IP link set dev ve2 mtu 1500 + $IP link set dev vens1 netns ns1 + $IP link set dev vens2 netns ns2 + + $IP -n ns1 link set dev lo up + $IP -n ns1 link set dev vens1 up + $IP -n ns1 addr add 10.1.1.101/24 dev vens1 + $IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad + $IP -n ns1 route add default via 10.1.1.1 dev vens1 + $IP -n ns1 route add default via 2401:db01::1 dev vens1 + + $IP -n ns2 link set dev lo up + $IP -n ns2 link set dev vens2 up + $IP -n ns2 addr add 10.2.1.102/24 dev vens2 + $IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad + $IP -n ns2 addr add 10.10.1.102 dev lo + $IP -n ns2 addr add 2401:face::66/64 dev lo nodad + $IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1 + $IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1 + $IP -n ns2 link set dev ipt2 up + $IP -n ns2 link set dev ip6t2 up + $IP netns exec ns2 $TC qdisc add dev vens2 clsact + $IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip + if [[ $tun_type == "ipip" ]]; then + $IP -n ns2 route add 10.1.1.0/24 dev ipt2 + $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 + $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0 + else + $IP -n ns2 route add 10.1.1.0/24 dev ip6t2 + $IP -n ns2 route add 2401:db01::/64 dev ip6t2 + $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 + $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0 + fi + + $IP addr add 10.1.1.1/24 dev ve1 + $IP addr add 2401:db01::1/64 dev ve1 nodad + $IP addr add 10.2.1.1/24 dev ve2 + $IP addr add 2401:db02::1/64 dev ve2 nodad + + $TC qdisc add dev ve2 clsact + $TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward + + sysctl -q -w net.ipv4.conf.all.rp_filter=0 + sysctl -q -w net.ipv6.conf.all.forwarding=1 +} + +function cleanup { + set +e + [[ -z $DEBUG ]] || set +x + $IP netns delete ns1 >& /dev/null + $IP netns delete ns2 >& /dev/null + $IP link del ve1 >& /dev/null + $IP link del ve2 >& /dev/null + $IP link del ipt >& /dev/null + $IP link del ip6t >& /dev/null + sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER + sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING + rm -f /sys/fs/bpf/tc/globals/tun_iface + [[ -z $DEBUG ]] || set -x + set -e +} + +function l2_to_ipip { + echo -n "l2_to_ipip $1: " + + local dir=$1 + + config_common ipip + + $IP link add ipt type ipip external + $IP link set dev ipt up + sysctl -q -w net.ipv4.conf.ipt.rp_filter=0 + sysctl -q -w net.ipv4.conf.ipt.forwarding=1 + + if [[ $dir == "egress" ]]; then + $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 + $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect + sysctl -q -w net.ipv4.conf.ve1.forwarding=1 + else + $TC qdisc add dev ve1 clsact + $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect + fi + + $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex) + + $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null + + if [[ $dir == "egress" ]]; then + # test direct egress to ve2 (i.e. not forwarding from + # ve1 to ve2). + ping -c1 10.10.1.102 >& /dev/null + fi + + cleanup + + echo "OK" +} + +function l2_to_ip6tnl { + echo -n "l2_to_ip6tnl $1: " + + local dir=$1 + + config_common ip6tnl + + $IP link add ip6t type ip6tnl mode any external + $IP link set dev ip6t up + sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0 + sysctl -q -w net.ipv4.conf.ip6t.forwarding=1 + + if [[ $dir == "egress" ]]; then + $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 + $IP route add 2401:face::/64 via 2401:db02::66 dev ve2 + $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect + sysctl -q -w net.ipv4.conf.ve1.forwarding=1 + else + $TC qdisc add dev ve1 clsact + $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect + fi + + $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex) + + $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null + $IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null + + if [[ $dir == "egress" ]]; then + # test direct egress to ve2 (i.e. not forwarding from + # ve1 to ve2). + ping -c1 10.10.1.102 >& /dev/null + ping -6 -c1 2401:face::66 >& /dev/null + fi + + cleanup + + echo "OK" +} + +cleanup +test_names="l2_to_ipip l2_to_ip6tnl" +test_dirs="ingress egress" +if [[ $# -ge 2 ]]; then + test_names=$1 + test_dirs=$2 +elif [[ $# -ge 1 ]]; then + test_names=$1 +fi + +for t in $test_names; do + for d in $test_dirs; do + $t $d + done +done diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c new file mode 100644 index 000000000000..92a44729dbe4 --- /dev/null +++ b/samples/bpf/tc_l2_redirect_kern.c @@ -0,0 +1,236 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" + +#define _htonl __builtin_bswap32 + +#define PIN_GLOBAL_NS 2 +struct bpf_elf_map { + __u32 type; + __u32 size_key; + __u32 size_value; + __u32 max_elem; + __u32 flags; + __u32 id; + __u32 pinning; +}; + +/* copy of 'struct ethhdr' without __packed */ +struct eth_hdr { + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + unsigned short h_proto; +}; + +struct bpf_elf_map SEC("maps") tun_iface = { + .type = BPF_MAP_TYPE_ARRAY, + .size_key = sizeof(int), + .size_value = sizeof(int), + .pinning = PIN_GLOBAL_NS, + .max_elem = 1, +}; + +static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr) +{ + if (eth_proto == htons(ETH_P_IP)) + return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100); + else if (eth_proto == htons(ETH_P_IPV6)) + return (daddr == _htonl(0x2401face)); + + return false; +} + +SEC("l2_to_iptun_ingress_forward") +int _l2_to_iptun_ingress_forward(struct __sk_buff *skb) +{ + struct bpf_tunnel_key tkey = {}; + void *data = (void *)(long)skb->data; + struct eth_hdr *eth = data; + void *data_end = (void *)(long)skb->data_end; + int key = 0, *ifindex; + + int ret; + + if (data + sizeof(*eth) > data_end) + return TC_ACT_OK; + + ifindex = bpf_map_lookup_elem(&tun_iface, &key); + if (!ifindex) + return TC_ACT_OK; + + if (eth->h_proto == htons(ETH_P_IP)) { + char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n"; + struct iphdr *iph = data + sizeof(*eth); + + if (data + sizeof(*eth) + sizeof(*iph) > data_end) + return TC_ACT_OK; + + if (iph->protocol != IPPROTO_IPIP) + return TC_ACT_OK; + + bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex, + _htonl(iph->daddr)); + return bpf_redirect(*ifindex, BPF_F_INGRESS); + } else if (eth->h_proto == htons(ETH_P_IPV6)) { + char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n"; + struct ipv6hdr *ip6h = data + sizeof(*eth); + + if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) + return TC_ACT_OK; + + if (ip6h->nexthdr != IPPROTO_IPIP && + ip6h->nexthdr != IPPROTO_IPV6) + return TC_ACT_OK; + + bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex, + _htonl(ip6h->daddr.s6_addr32[0]), + _htonl(ip6h->daddr.s6_addr32[3])); + return bpf_redirect(*ifindex, BPF_F_INGRESS); + } + + return TC_ACT_OK; +} + +SEC("l2_to_iptun_ingress_redirect") +int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb) +{ + struct bpf_tunnel_key tkey = {}; + void *data = (void *)(long)skb->data; + struct eth_hdr *eth = data; + void *data_end = (void *)(long)skb->data_end; + int key = 0, *ifindex; + + int ret; + + if (data + sizeof(*eth) > data_end) + return TC_ACT_OK; + + ifindex = bpf_map_lookup_elem(&tun_iface, &key); + if (!ifindex) + return TC_ACT_OK; + + if (eth->h_proto == htons(ETH_P_IP)) { + char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; + struct iphdr *iph = data + sizeof(*eth); + __be32 daddr = iph->daddr; + + if (data + sizeof(*eth) + sizeof(*iph) > data_end) + return TC_ACT_OK; + + if (!is_vip_addr(eth->h_proto, daddr)) + return TC_ACT_OK; + + bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex); + } else { + return TC_ACT_OK; + } + + tkey.tunnel_id = 10000; + tkey.tunnel_ttl = 64; + tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */ + bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0); + return bpf_redirect(*ifindex, 0); +} + +SEC("l2_to_ip6tun_ingress_redirect") +int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb) +{ + struct bpf_tunnel_key tkey = {}; + void *data = (void *)(long)skb->data; + struct eth_hdr *eth = data; + void *data_end = (void *)(long)skb->data_end; + int key = 0, *ifindex; + + if (data + sizeof(*eth) > data_end) + return TC_ACT_OK; + + ifindex = bpf_map_lookup_elem(&tun_iface, &key); + if (!ifindex) + return TC_ACT_OK; + + if (eth->h_proto == htons(ETH_P_IP)) { + char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; + struct iphdr *iph = data + sizeof(*eth); + + if (data + sizeof(*eth) + sizeof(*iph) > data_end) + return TC_ACT_OK; + + if (!is_vip_addr(eth->h_proto, iph->daddr)) + return TC_ACT_OK; + + bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr), + *ifindex); + } else if (eth->h_proto == htons(ETH_P_IPV6)) { + char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n"; + struct ipv6hdr *ip6h = data + sizeof(*eth); + + if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) + return TC_ACT_OK; + + if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) + return TC_ACT_OK; + + bpf_trace_printk(fmt6, sizeof(fmt6), + _htonl(ip6h->daddr.s6_addr32[0]), *ifindex); + } else { + return TC_ACT_OK; + } + + tkey.tunnel_id = 10000; + tkey.tunnel_ttl = 64; + /* 2401:db02:0:0:0:0:0:66 */ + tkey.remote_ipv6[0] = _htonl(0x2401db02); + tkey.remote_ipv6[1] = 0; + tkey.remote_ipv6[2] = 0; + tkey.remote_ipv6[3] = _htonl(0x00000066); + bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6); + return bpf_redirect(*ifindex, 0); +} + +SEC("drop_non_tun_vip") +int _drop_non_tun_vip(struct __sk_buff *skb) +{ + struct bpf_tunnel_key tkey = {}; + void *data = (void *)(long)skb->data; + struct eth_hdr *eth = data; + void *data_end = (void *)(long)skb->data_end; + + if (data + sizeof(*eth) > data_end) + return TC_ACT_OK; + + if (eth->h_proto == htons(ETH_P_IP)) { + struct iphdr *iph = data + sizeof(*eth); + + if (data + sizeof(*eth) + sizeof(*iph) > data_end) + return TC_ACT_OK; + + if (is_vip_addr(eth->h_proto, iph->daddr)) + return TC_ACT_SHOT; + } else if (eth->h_proto == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = data + sizeof(*eth); + + if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) + return TC_ACT_OK; + + if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) + return TC_ACT_SHOT; + } + + return TC_ACT_OK; +} + +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/tc_l2_redirect_user.c b/samples/bpf/tc_l2_redirect_user.c new file mode 100644 index 000000000000..4013c5337b91 --- /dev/null +++ b/samples/bpf/tc_l2_redirect_user.c @@ -0,0 +1,73 @@ +/* Copyright (c) 2016 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include + +#include +#include +#include +#include +#include + +#include "libbpf.h" + +static void usage(void) +{ + printf("Usage: tc_l2_ipip_redirect [...]\n"); + printf(" -U Update an already pinned BPF array\n"); + printf(" -i Interface index\n"); + printf(" -h Display this help\n"); +} + +int main(int argc, char **argv) +{ + const char *pinned_file = NULL; + int ifindex = -1; + int array_key = 0; + int array_fd = -1; + int ret = -1; + int opt; + + while ((opt = getopt(argc, argv, "F:U:i:")) != -1) { + switch (opt) { + /* General args */ + case 'U': + pinned_file = optarg; + break; + case 'i': + ifindex = atoi(optarg); + break; + default: + usage(); + goto out; + } + } + + if (ifindex < 0 || !pinned_file) { + usage(); + goto out; + } + + array_fd = bpf_obj_get(pinned_file); + if (array_fd < 0) { + fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n", + pinned_file, strerror(errno), errno); + goto out; + } + + /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ + ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0); + if (ret) { + perror("bpf_update_elem"); + goto out; + } + +out: + if (array_fd != -1) + close(array_fd); + return ret; +} diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c index 71a8ed32823e..41b6115a32eb 100644 --- a/samples/bpf/trace_event_kern.c +++ b/samples/bpf/trace_event_kern.c @@ -50,7 +50,7 @@ int bpf_prog1(struct bpf_perf_event_data *ctx) key.userstack = bpf_get_stackid(ctx, &stackmap, USER_STACKID_FLAGS); if ((int)key.kernstack < 0 && (int)key.userstack < 0) { bpf_trace_printk(fmt, sizeof(fmt), cpu, ctx->sample_period, - ctx->regs.ip); + PT_REGS_IP(&ctx->regs)); return 0; } diff --git a/scripts/Makefile.build b/scripts/Makefile.build index de46ab03f063..7675d11ee65e 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -159,7 +159,8 @@ cmd_cpp_i_c = $(CPP) $(c_flags) -o $@ $< $(obj)/%.i: $(src)/%.c FORCE $(call if_changed_dep,cpp_i_c) -cmd_gensymtypes = \ +# These mirror gensymtypes_S and co below, keep them in synch. +cmd_gensymtypes_c = \ $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ $(GENKSYMS) $(if $(1), -T $(2)) \ $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ @@ -169,7 +170,7 @@ cmd_gensymtypes = \ quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ cmd_cc_symtypes_c = \ set -e; \ - $(call cmd_gensymtypes,true,$@) >/dev/null; \ + $(call cmd_gensymtypes_c,true,$@) >/dev/null; \ test -s $@ || rm -f $@ $(obj)/%.symtypes : $(src)/%.c FORCE @@ -198,9 +199,10 @@ else # the actual value of the checksum generated by genksyms cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< -cmd_modversions = \ + +cmd_modversions_c = \ if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ - $(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ + $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ > $(@D)/.tmp_$(@F:.o=.ver); \ \ $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ @@ -268,13 +270,14 @@ endif # CONFIG_STACK_VALIDATION define rule_cc_o_c $(call echo-cmd,checksrc) $(cmd_checksrc) \ $(call cmd_and_fixdep,cc_o_c) \ - $(cmd_modversions) \ + $(cmd_modversions_c) \ $(cmd_objtool) \ $(call echo-cmd,record_mcount) $(cmd_record_mcount) endef define rule_as_o_S $(call cmd_and_fixdep,as_o_S) \ + $(cmd_modversions_S) \ $(cmd_objtool) endef @@ -314,6 +317,39 @@ modkern_aflags := $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL) $(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) +# .S file exports must have their C prototypes defined in asm/asm-prototypes.h +# or a file that it includes, in order to get versioned symbols. We build a +# dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from +# the .S file (with trailing ';'), and run genksyms on that, to extract vers. +# +# This is convoluted. The .S file must first be preprocessed to run guards and +# expand names, then the resulting exports must be constructed into plain +# EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed +# to make the genksyms input. +# +# These mirror gensymtypes_c and co above, keep them in synch. +cmd_gensymtypes_S = \ + (echo "\#include " ; \ + echo "\#include " ; \ + $(CPP) $(a_flags) $< | \ + grep "\<___EXPORT_SYMBOL\>" | \ + sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \ + $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ + $(GENKSYMS) $(if $(1), -T $(2)) \ + $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ + $(if $(KBUILD_PRESERVE),-p) \ + -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) + +quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@ +cmd_cc_symtypes_S = \ + set -e; \ + $(call cmd_gensymtypes_S,true,$@) >/dev/null; \ + test -s $@ || rm -f $@ + +$(obj)/%.symtypes : $(src)/%.S FORCE + $(call cmd,cc_symtypes_S) + + quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@ cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $< @@ -321,7 +357,37 @@ $(obj)/%.s: $(src)/%.S FORCE $(call if_changed_dep,cpp_s_S) quiet_cmd_as_o_S = AS $(quiet_modtag) $@ -cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< + +ifndef CONFIG_MODVERSIONS +cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< + +else + +ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h) + +ifeq ($(ASM_PROTOTYPES),) +cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< + +else + +# versioning matches the C process described above, with difference that +# we parse asm-prototypes.h C header to get function definitions. + +cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $< + +cmd_modversions_S = \ + if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ + $(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ + > $(@D)/.tmp_$(@F:.o=.ver); \ + \ + $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ + -T $(@D)/.tmp_$(@F:.o=.ver); \ + rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \ + else \ + mv -f $(@D)/.tmp_$(@F) $@; \ + fi; +endif +endif $(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE $(call if_changed_rule,as_o_S) @@ -430,6 +496,9 @@ cmd_export_list = $(OBJDUMP) -h $< | \ $(obj)/lib-ksyms.o: $(lib-target) FORCE $(call if_changed,export_list) + +targets += $(obj)/lib-ksyms.o + endif # diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh index 973e8c141567..17867e723a51 100755 --- a/scripts/gcc-x86_64-has-stack-protector.sh +++ b/scripts/gcc-x86_64-has-stack-protector.sh @@ -1,6 +1,6 @@ #!/bin/sh -echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs" +echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" if [ "$?" -eq "0" ] ; then echo y else diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile index ebced77deb9c..90a091b6ae4d 100644 --- a/scripts/kconfig/Makefile +++ b/scripts/kconfig/Makefile @@ -35,6 +35,8 @@ nconfig: $(obj)/nconf silentoldconfig: $(obj)/conf $(Q)mkdir -p include/config include/generated + $(Q)test -e include/generated/autoksyms.h || \ + touch include/generated/autoksyms.h $< $(silent) --$@ $(Kconfig) localyesconfig localmodconfig: $(obj)/streamline_config.pl $(obj)/conf diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index fc3036b34e51..a4d90aa1045a 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) /* released below */ cred = get_current_cred(); cxt = cred_cxt(cred); - profile = aa_cred_profile(cred); - previous_profile = cxt->previous; + profile = aa_get_newest_profile(aa_cred_profile(cred)); + previous_profile = aa_get_newest_profile(cxt->previous); if (unconfined(profile)) { info = "unconfined"; @@ -718,6 +718,8 @@ audit: out: aa_put_profile(hat); kfree(name); + aa_put_profile(profile); + aa_put_profile(previous_profile); put_cred(cred); return error; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2f909dd8b7b8..ea81c08ddc7a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6907,8 +6907,6 @@ static const struct hda_fixup alc662_fixups[] = { .v.pins = (const struct hda_pintbl[]) { { 0x15, 0x40f000f0 }, /* disabled */ { 0x16, 0x40f000f0 }, /* disabled */ - { 0x18, 0x01014011 }, /* LO */ - { 0x1a, 0x01014012 }, /* LO */ { } } }, diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index 6a23302297c9..4d9d320a7971 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c @@ -13,7 +13,8 @@ static void (*old_vmaster_hook)(void *, int); static bool is_thinkpad(struct hda_codec *codec) { return (codec->core.subsystem_id >> 16 == 0x17aa) && - (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068")); + (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") || + acpi_dev_found("IBM0068")); } static void update_tpacpi_mute_led(void *private_data, int enabled) diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c index 07000f53db44..b392e51de94d 100644 --- a/sound/soc/qcom/lpass-platform.c +++ b/sound/soc/qcom/lpass-platform.c @@ -75,6 +75,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) data->i2s_port = cpu_dai->driver->id; runtime->private_data = data; + dma_ch = 0; if (v->alloc_dma_channel) dma_ch = v->alloc_dma_channel(drvdata, dir); if (dma_ch < 0) diff --git a/sound/sparc/dbri.c b/sound/sparc/dbri.c index 0190cb6332f2..3fe4468ea2c5 100644 --- a/sound/sparc/dbri.c +++ b/sound/sparc/dbri.c @@ -304,7 +304,7 @@ struct snd_dbri { spinlock_t lock; struct dbri_dma *dma; /* Pointer to our DMA block */ - u32 dma_dvma; /* DBRI visible DMA address */ + dma_addr_t dma_dvma; /* DBRI visible DMA address */ void __iomem *regs; /* dbri HW regs */ int dbri_irqp; /* intr queue pointer */ @@ -657,12 +657,14 @@ static void dbri_cmdwait(struct snd_dbri *dbri) */ static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len) { + u32 dvma_addr = (u32)dbri->dma_dvma; + /* Space for 2 WAIT cmds (replaced later by 1 JUMP cmd) */ len += 2; spin_lock(&dbri->cmdlock); if (dbri->cmdptr - dbri->dma->cmd + len < DBRI_NO_CMDS - 2) return dbri->cmdptr + 2; - else if (len < sbus_readl(dbri->regs + REG8) - dbri->dma_dvma) + else if (len < sbus_readl(dbri->regs + REG8) - dvma_addr) return dbri->dma->cmd; else printk(KERN_ERR "DBRI: no space for commands."); @@ -680,6 +682,7 @@ static s32 *dbri_cmdlock(struct snd_dbri *dbri, int len) */ static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len) { + u32 dvma_addr = (u32)dbri->dma_dvma; s32 tmp, addr; static int wait_id = 0; @@ -689,7 +692,7 @@ static void dbri_cmdsend(struct snd_dbri *dbri, s32 *cmd, int len) *(cmd+1) = DBRI_CMD(D_WAIT, 1, wait_id); /* Replace the last command with JUMP */ - addr = dbri->dma_dvma + (cmd - len - dbri->dma->cmd) * sizeof(s32); + addr = dvma_addr + (cmd - len - dbri->dma->cmd) * sizeof(s32); *(dbri->cmdptr+1) = addr; *(dbri->cmdptr) = DBRI_CMD(D_JUMP, 0, 0); @@ -747,6 +750,7 @@ static void dbri_reset(struct snd_dbri *dbri) /* Lock must not be held before calling this */ static void dbri_initialize(struct snd_dbri *dbri) { + u32 dvma_addr = (u32)dbri->dma_dvma; s32 *cmd; u32 dma_addr; unsigned long flags; @@ -764,7 +768,7 @@ static void dbri_initialize(struct snd_dbri *dbri) /* * Initialize the interrupt ring buffer. */ - dma_addr = dbri->dma_dvma + dbri_dma_off(intr, 0); + dma_addr = dvma_addr + dbri_dma_off(intr, 0); dbri->dma->intr[0] = dma_addr; dbri->dbri_irqp = 1; /* @@ -778,7 +782,7 @@ static void dbri_initialize(struct snd_dbri *dbri) dbri->cmdptr = cmd; *(cmd++) = DBRI_CMD(D_WAIT, 1, 0); *(cmd++) = DBRI_CMD(D_WAIT, 1, 0); - dma_addr = dbri->dma_dvma + dbri_dma_off(cmd, 0); + dma_addr = dvma_addr + dbri_dma_off(cmd, 0); sbus_writel(dma_addr, dbri->regs + REG8); spin_unlock(&dbri->cmdlock); @@ -1077,6 +1081,7 @@ static void recv_fixed(struct snd_dbri *dbri, int pipe, volatile __u32 *ptr) static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period) { struct dbri_streaminfo *info = &dbri->stream_info[streamno]; + u32 dvma_addr = (u32)dbri->dma_dvma; __u32 dvma_buffer; int desc; int len; @@ -1177,7 +1182,7 @@ static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period) else { dbri->next_desc[last_desc] = desc; dbri->dma->desc[last_desc].nda = - dbri->dma_dvma + dbri_dma_off(desc, desc); + dvma_addr + dbri_dma_off(desc, desc); } last_desc = desc; @@ -1192,7 +1197,7 @@ static int setup_descs(struct snd_dbri *dbri, int streamno, unsigned int period) } dbri->dma->desc[last_desc].nda = - dbri->dma_dvma + dbri_dma_off(desc, first_desc); + dvma_addr + dbri_dma_off(desc, first_desc); dbri->next_desc[last_desc] = first_desc; dbri->pipes[info->pipe].first_desc = first_desc; dbri->pipes[info->pipe].desc = first_desc; @@ -1697,6 +1702,7 @@ interrupts are disabled. static void xmit_descs(struct snd_dbri *dbri) { struct dbri_streaminfo *info; + u32 dvma_addr = (u32)dbri->dma_dvma; s32 *cmd; unsigned long flags; int first_td; @@ -1718,7 +1724,7 @@ static void xmit_descs(struct snd_dbri *dbri) *(cmd++) = DBRI_CMD(D_SDP, 0, dbri->pipes[info->pipe].sdp | D_SDP_P | D_SDP_EVERY | D_SDP_C); - *(cmd++) = dbri->dma_dvma + + *(cmd++) = dvma_addr + dbri_dma_off(desc, first_td); dbri_cmdsend(dbri, cmd, 2); @@ -1740,7 +1746,7 @@ static void xmit_descs(struct snd_dbri *dbri) *(cmd++) = DBRI_CMD(D_SDP, 0, dbri->pipes[info->pipe].sdp | D_SDP_P | D_SDP_EVERY | D_SDP_C); - *(cmd++) = dbri->dma_dvma + + *(cmd++) = dvma_addr + dbri_dma_off(desc, first_td); dbri_cmdsend(dbri, cmd, 2); @@ -2539,7 +2545,7 @@ static int snd_dbri_create(struct snd_card *card, if (!dbri->dma) return -ENOMEM; - dprintk(D_GEN, "DMA Cmd Block 0x%p (0x%08x)\n", + dprintk(D_GEN, "DMA Cmd Block 0x%p (%pad)\n", dbri->dma, dbri->dma_dvma); /* Map the registers into memory. */ diff --git a/sound/usb/card.c b/sound/usb/card.c index 9e5276d6dda0..2ddc034673a8 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -315,7 +315,8 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip) snd_usb_endpoint_free(ep); mutex_destroy(&chip->mutex); - dev_set_drvdata(&chip->dev->dev, NULL); + if (!atomic_read(&chip->shutdown)) + dev_set_drvdata(&chip->dev->dev, NULL); kfree(chip); return 0; } diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 4ffff7be9299..a53fef0c673b 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -1337,8 +1337,8 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser, } if (first) { - ui_browser__printf(&browser->b, "%c", folded_sign); - width--; + ui_browser__printf(&browser->b, "%c ", folded_sign); + width -= 2; first = false; } else { ui_browser__printf(&browser->b, " "); @@ -1361,8 +1361,10 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser, width -= hpp.buf - s; } - ui_browser__write_nstring(&browser->b, "", hierarchy_indent); - width -= hierarchy_indent; + if (!first) { + ui_browser__write_nstring(&browser->b, "", hierarchy_indent); + width -= hierarchy_indent; + } if (column >= browser->b.horiz_scroll) { char s[2048]; @@ -1381,7 +1383,13 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser, } perf_hpp_list__for_each_format(entry->hpp_list, fmt) { - ui_browser__write_nstring(&browser->b, "", 2); + if (first) { + ui_browser__printf(&browser->b, "%c ", folded_sign); + first = false; + } else { + ui_browser__write_nstring(&browser->b, "", 2); + } + width -= 2; /* @@ -1555,10 +1563,11 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows int indent = hists->nr_hpp_node - 2; bool first_node, first_col; - ret = scnprintf(buf, size, " "); + ret = scnprintf(buf, size, " "); if (advance_hpp_check(&dummy_hpp, ret)) return ret; + first_node = true; /* the first hpp_list_node is for overhead columns */ fmt_node = list_first_entry(&hists->hpp_formats, struct perf_hpp_list_node, list); @@ -1573,12 +1582,16 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " "); if (advance_hpp_check(&dummy_hpp, ret)) break; + + first_node = false; } - ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", - indent * HIERARCHY_INDENT, ""); - if (advance_hpp_check(&dummy_hpp, ret)) - return ret; + if (!first_node) { + ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", + indent * HIERARCHY_INDENT, ""); + if (advance_hpp_check(&dummy_hpp, ret)) + return ret; + } first_node = true; list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { @@ -2076,8 +2089,21 @@ void hist_browser__init(struct hist_browser *browser, browser->b.use_navkeypressed = true; browser->show_headers = symbol_conf.show_hist_headers; - hists__for_each_format(hists, fmt) + if (symbol_conf.report_hierarchy) { + struct perf_hpp_list_node *fmt_node; + + /* count overhead columns (in the first node) */ + fmt_node = list_first_entry(&hists->hpp_formats, + struct perf_hpp_list_node, list); + perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) + ++browser->b.columns; + + /* add a single column for whole hierarchy sort keys*/ ++browser->b.columns; + } else { + hists__for_each_format(hists, fmt) + ++browser->b.columns; + } hists__reset_column_width(hists); } diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index b02992efb513..a69f027368ef 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -1600,18 +1600,18 @@ static void hists__hierarchy_output_resort(struct hists *hists, if (prog) ui_progress__update(prog, 1); + hists->nr_entries++; + if (!he->filtered) { + hists->nr_non_filtered_entries++; + hists__calc_col_len(hists, he); + } + if (!he->leaf) { hists__hierarchy_output_resort(hists, prog, &he->hroot_in, &he->hroot_out, min_callchain_hits, use_callchain); - hists->nr_entries++; - if (!he->filtered) { - hists->nr_non_filtered_entries++; - hists__calc_col_len(hists, he); - } - continue; } diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config index a538ff44b108..a1883bbb0144 100644 --- a/tools/power/acpi/Makefile.config +++ b/tools/power/acpi/Makefile.config @@ -8,18 +8,19 @@ # as published by the Free Software Foundation; version 2 # of the License. -include ../../../../scripts/Makefile.include +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(shell pwd))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +#$(info Determined 'srctree' to be $(srctree)) +endif -OUTPUT=./ +include $(srctree)/../../scripts/Makefile.include + +OUTPUT=$(srctree)/ ifeq ("$(origin O)", "command line") - OUTPUT := $(O)/ -endif - -ifneq ($(OUTPUT),) -# check that the output directory actually exists -OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) -$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) + OUTPUT := $(O)/power/acpi/ endif +#$(info Determined 'OUTPUT' to be $(OUTPUT)) # --- CONFIGURATION BEGIN --- @@ -70,8 +71,8 @@ WARNINGS := -Wall WARNINGS += $(call cc-supports,-Wstrict-prototypes) WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) -KERNEL_INCLUDE := ../../../include -ACPICA_INCLUDE := ../../../drivers/acpi/acpica +KERNEL_INCLUDE := $(OUTPUT)include +ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) CFLAGS += $(WARNINGS) diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules index ec87a9e562c0..373738338f51 100644 --- a/tools/power/acpi/Makefile.rules +++ b/tools/power/acpi/Makefile.rules @@ -8,28 +8,42 @@ # as published by the Free Software Foundation; version 2 # of the License. -$(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE - $(ECHO) " LD " $@ - $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@ +objdir := $(OUTPUT)tools/$(TOOL)/ +toolobjs := $(addprefix $(objdir),$(TOOL_OBJS)) +$(OUTPUT)$(TOOL): $(toolobjs) FORCE + $(ECHO) " LD " $(subst $(OUTPUT),,$@) + $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(toolobjs) -L$(OUTPUT) -o $@ + $(ECHO) " STRIP " $(subst $(OUTPUT),,$@) $(QUIET) $(STRIPCMD) $@ -$(OUTPUT)%.o: %.c - $(ECHO) " CC " $@ +$(KERNEL_INCLUDE): + $(ECHO) " MKDIR " $(subst $(OUTPUT),,$@) + $(QUIET) mkdir -p $(KERNEL_INCLUDE) + $(ECHO) " CP " $(subst $(OUTPUT),,$@) + $(QUIET) cp -rf $(srctree)/../../../include/acpi $(KERNEL_INCLUDE)/ + +$(objdir)%.o: %.c $(KERNEL_INCLUDE) + $(ECHO) " CC " $(subst $(OUTPUT),,$@) $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< all: $(OUTPUT)$(TOOL) clean: - -find $(OUTPUT) \( -not -type d \) \ - -and \( -name '*~' -o -name '*.[oas]' \) \ - -type f -print \ - | xargs rm -f - -rm -f $(OUTPUT)$(TOOL) + $(ECHO) " RMOBJ " $(subst $(OUTPUT),,$(objdir)) + $(QUIET) find $(objdir) \( -not -type d \)\ + -and \( -name '*~' -o -name '*.[oas]' \)\ + -type f -print | xargs rm -f + $(ECHO) " RM " $(TOOL) + $(QUIET) rm -f $(OUTPUT)$(TOOL) + $(ECHO) " RMINC " $(subst $(OUTPUT),,$(KERNEL_INCLUDE)) + $(QUIET) rm -rf $(KERNEL_INCLUDE) install-tools: - $(INSTALL) -d $(DESTDIR)${sbindir} - $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir} + $(ECHO) " INST " $(TOOL) + $(QUIET) $(INSTALL) -d $(DESTDIR)$(sbindir) + $(QUIET) $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)$(sbindir) uninstall-tools: - - rm -f $(DESTDIR)${sbindir}/$(TOOL) + $(ECHO) " UNINST " $(TOOL) + $(QUIET) rm -f $(DESTDIR)$(sbindir)/$(TOOL) install: all install-tools $(EXTRA_INSTALL) uninstall: uninstall-tools $(EXTRA_UNINSTALL) diff --git a/tools/power/acpi/tools/acpidbg/Makefile b/tools/power/acpi/tools/acpidbg/Makefile index 352df4b41ae9..f2d06e773eb4 100644 --- a/tools/power/acpi/tools/acpidbg/Makefile +++ b/tools/power/acpi/tools/acpidbg/Makefile @@ -17,9 +17,7 @@ vpath %.c \ ../../os_specific/service_layers\ . CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\ - -I.\ - -I../../../../../drivers/acpi/acpica\ - -I../../../../../include + -I. LDFLAGS += -lpthread TOOL_OBJS = \ acpidbg.o diff --git a/tools/power/acpi/tools/acpidbg/acpidbg.c b/tools/power/acpi/tools/acpidbg/acpidbg.c index a88ac45b7756..4308362d7068 100644 --- a/tools/power/acpi/tools/acpidbg/acpidbg.c +++ b/tools/power/acpi/tools/acpidbg/acpidbg.c @@ -12,10 +12,16 @@ #include /* Headers not included by include/acpi/platform/aclinux.h */ +#include +#include +#include +#include +#include #include #include #include -#include +#include +#include "../../../../../include/linux/circ_buf.h" #define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg" #define ACPI_AML_SEC_TICK 1 diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile index 04b5db7c7c0b..f7c7af1f9258 100644 --- a/tools/power/acpi/tools/acpidump/Makefile +++ b/tools/power/acpi/tools/acpidump/Makefile @@ -19,9 +19,7 @@ vpath %.c \ ./\ ../../common\ ../../os_specific/service_layers -CFLAGS += -DACPI_DUMP_APP -I.\ - -I../../../../../drivers/acpi/acpica\ - -I../../../../../include +CFLAGS += -DACPI_DUMP_APP -I. TOOL_OBJS = \ apdump.o\ apfiles.o\ @@ -49,7 +47,9 @@ TOOL_OBJS = \ include ../../Makefile.rules -install-man: ../../man/acpidump.8 - $(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8 +install-man: $(srctree)/man/acpidump.8 + $(ECHO) " INST " acpidump.8 + $(QUIET) $(INSTALL_DATA) -D $< $(DESTDIR)$(mandir)/man8/acpidump.8 uninstall-man: - - rm -f $(DESTDIR)${mandir}/man8/acpidump.8 + $(ECHO) " UNINST " acpidump.8 + $(QUIET) rm -f $(DESTDIR)$(mandir)/man8/acpidump.8 diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 6e9c40eea208..69ccce308458 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c @@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) continue; type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) & ARMV8_PMU_EVTYPE_EVENT; - if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) + if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR) && (enable & BIT(i))) { reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; reg = lower_32_bits(reg); @@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, eventsel = data & ARMV8_PMU_EVTYPE_EVENT; /* Software increment event does't need to be backed by a perf event */ - if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) + if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR && + select_idx != ARMV8_PMU_CYCLE_IDX) return; memset(&attr, 0, sizeof(struct perf_event_attr)); @@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; attr.exclude_hv = 1; /* Don't count EL2 events */ attr.exclude_host = 1; /* Don't count host events */ - attr.config = eventsel; + attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ? + ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel; counter = kvm_pmu_get_counter_value(vcpu, select_idx); /* The initial sample period (overflow count) of an event. */ diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 0a063af40565..9bab86757fa4 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -50,8 +50,10 @@ void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu) WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE); - kvm_notify_acked_irq(vcpu->kvm, 0, - intid - VGIC_NR_PRIVATE_IRQS); + /* Only SPIs require notification */ + if (vgic_valid_spi(vcpu->kvm, intid)) + kvm_notify_acked_irq(vcpu->kvm, 0, + intid - VGIC_NR_PRIVATE_IRQS); } } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 9f0dae397d9c..5c9f9745e6ca 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -41,8 +41,10 @@ void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu) WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE); - kvm_notify_acked_irq(vcpu->kvm, 0, - intid - VGIC_NR_PRIVATE_IRQS); + /* Only SPIs require notification */ + if (vgic_valid_spi(vcpu->kvm, intid)) + kvm_notify_acked_irq(vcpu->kvm, 0, + intid - VGIC_NR_PRIVATE_IRQS); } /* diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 8035cc1eb955..efeceb0a222d 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -91,6 +91,7 @@ static void async_pf_execute(struct work_struct *work) spin_lock(&vcpu->async_pf.lock); list_add_tail(&apf->link, &vcpu->async_pf.done); + apf->vcpu = NULL; spin_unlock(&vcpu->async_pf.lock); /* @@ -113,6 +114,8 @@ static void async_pf_execute(struct work_struct *work) void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) { + spin_lock(&vcpu->async_pf.lock); + /* cancel outstanding work queue item */ while (!list_empty(&vcpu->async_pf.queue)) { struct kvm_async_pf *work = @@ -120,6 +123,14 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) typeof(*work), queue); list_del(&work->queue); + /* + * We know it's present in vcpu->async_pf.done, do + * nothing here. + */ + if (!work->vcpu) + continue; + + spin_unlock(&vcpu->async_pf.lock); #ifdef CONFIG_KVM_ASYNC_PF_SYNC flush_work(&work->work); #else @@ -129,9 +140,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) kmem_cache_free(async_pf_cache, work); } #endif + spin_lock(&vcpu->async_pf.lock); } - spin_lock(&vcpu->async_pf.lock); while (!list_empty(&vcpu->async_pf.done)) { struct kvm_async_pf *work = list_first_entry(&vcpu->async_pf.done, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5c360347a1e9..7f9ee2929cfe 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2889,10 +2889,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm, ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); if (ret < 0) { - ops->destroy(dev); mutex_lock(&kvm->lock); list_del(&dev->vm_node); mutex_unlock(&kvm->lock); + ops->destroy(dev); return ret; }