forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: include/net/inetpeer.h net/ipv6/output_core.c Changes in net were fixing bugs in code removed in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c99f7abf0e
@ -79,7 +79,7 @@
|
||||
<partintro>
|
||||
<para>
|
||||
This first part of the DRM Developer's Guide documents core DRM code,
|
||||
helper libraries for writting drivers and generic userspace interfaces
|
||||
helper libraries for writing drivers and generic userspace interfaces
|
||||
exposed by DRM drivers.
|
||||
</para>
|
||||
</partintro>
|
||||
@ -459,7 +459,7 @@ char *date;</synopsis>
|
||||
providing a solution to every graphics memory-related problems, GEM
|
||||
identified common code between drivers and created a support library to
|
||||
share it. GEM has simpler initialization and execution requirements than
|
||||
TTM, but has no video RAM management capabitilies and is thus limited to
|
||||
TTM, but has no video RAM management capabilities and is thus limited to
|
||||
UMA devices.
|
||||
</para>
|
||||
<sect2>
|
||||
@ -889,7 +889,7 @@ int (*prime_fd_to_handle)(struct drm_device *dev,
|
||||
vice versa. Drivers must use the kernel dma-buf buffer sharing framework
|
||||
to manage the PRIME file descriptors. Similar to the mode setting
|
||||
API PRIME is agnostic to the underlying buffer object manager, as
|
||||
long as handles are 32bit unsinged integers.
|
||||
long as handles are 32bit unsigned integers.
|
||||
</para>
|
||||
<para>
|
||||
While non-GEM drivers must implement the operations themselves, GEM
|
||||
@ -2356,7 +2356,7 @@ void intel_crt_init(struct drm_device *dev)
|
||||
first create properties and then create and associate individual instances
|
||||
of those properties to objects. A property can be instantiated multiple
|
||||
times and associated with different objects. Values are stored in property
|
||||
instances, and all other property information are stored in the propery
|
||||
instances, and all other property information are stored in the property
|
||||
and shared between all instances of the property.
|
||||
</para>
|
||||
<para>
|
||||
@ -2697,10 +2697,10 @@ int num_ioctls;</synopsis>
|
||||
<sect1>
|
||||
<title>Legacy Support Code</title>
|
||||
<para>
|
||||
The section very brievely covers some of the old legacy support code which
|
||||
The section very briefly covers some of the old legacy support code which
|
||||
is only used by old DRM drivers which have done a so-called shadow-attach
|
||||
to the underlying device instead of registering as a real driver. This
|
||||
also includes some of the old generic buffer mangement and command
|
||||
also includes some of the old generic buffer management and command
|
||||
submission code. Do not use any of this in new and modern drivers.
|
||||
</para>
|
||||
|
||||
|
@ -25,9 +25,11 @@ using data transfer rates in the order of 10MB/s or more.
|
||||
With most FireWire controllers, memory access is limited to the low 4 GB
|
||||
of physical address space. This can be a problem on IA64 machines where
|
||||
memory is located mostly above that limit, but it is rarely a problem on
|
||||
more common hardware such as x86, x86-64 and PowerPC. However, at least
|
||||
Agere/LSI FW643e and FW643e2 controllers are known to support access to
|
||||
physical addresses above 4 GB.
|
||||
more common hardware such as x86, x86-64 and PowerPC.
|
||||
|
||||
At least LSI FW643e and FW643e2 controllers are known to support access to
|
||||
physical addresses above 4 GB, but this feature is currently not enabled by
|
||||
Linux.
|
||||
|
||||
Together with a early initialization of the OHCI-1394 controller for debugging,
|
||||
this facility proved most useful for examining long debugs logs in the printk
|
||||
@ -101,8 +103,9 @@ Step-by-step instructions for using firescope with early OHCI initialization:
|
||||
compliant, they are based on TI PCILynx chips and require drivers for Win-
|
||||
dows operating systems.
|
||||
|
||||
The mentioned kernel log message contains ">4 GB phys DMA" in case of
|
||||
OHCI-1394 controllers which support accesses above this limit.
|
||||
The mentioned kernel log message contains the string "physUB" if the
|
||||
controller implements a writable Physical Upper Bound register. This is
|
||||
required for physical DMA above 4 GB (but not utilized by Linux yet).
|
||||
|
||||
2) Establish a working FireWire cable connection:
|
||||
|
||||
|
@ -309,7 +309,10 @@ ii) Status
|
||||
error_if_no_space|queue_if_no_space
|
||||
If the pool runs out of data or metadata space, the pool will
|
||||
either queue or error the IO destined to the data device. The
|
||||
default is to queue the IO until more space is added.
|
||||
default is to queue the IO until more space is added or the
|
||||
'no_space_timeout' expires. The 'no_space_timeout' dm-thin-pool
|
||||
module parameter can be used to change this timeout -- it
|
||||
defaults to 60 seconds but may be disabled using a value of 0.
|
||||
|
||||
iii) Messages
|
||||
|
||||
|
@ -201,20 +201,15 @@ To beat some sense out of the internal editor, do this:
|
||||
|
||||
- Edit your Thunderbird config settings so that it won't use format=flowed.
|
||||
Go to "edit->preferences->advanced->config editor" to bring up the
|
||||
thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
|
||||
"false".
|
||||
thunderbird's registry editor.
|
||||
|
||||
- Disable HTML Format: Set "mail.identity.id1.compose_html" to "false".
|
||||
- Set "mailnews.send_plaintext_flowed" to "false"
|
||||
|
||||
- Enable "preformat" mode: Set "editor.quotesPreformatted" to "true".
|
||||
- Set "mailnews.wraplength" from "72" to "0"
|
||||
|
||||
- Enable UTF8: Set "prefs.converted-to-utf8" to "true".
|
||||
- "View" > "Message Body As" > "Plain Text"
|
||||
|
||||
- Install the "toggle wordwrap" extension. Download the file from:
|
||||
https://addons.mozilla.org/thunderbird/addon/2351/
|
||||
Then go to "tools->add ons", select "install" at the bottom of the screen,
|
||||
and browse to where you saved the .xul file. This adds an "Enable
|
||||
Wordwrap" entry under the Options menu of the message composer.
|
||||
- "View" > "Character Encoding" > "Unicode (UTF-8)"
|
||||
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
TkRat (GUI)
|
||||
|
@ -1245,8 +1245,9 @@ second). The meanings of the columns are as follows, from left to right:
|
||||
|
||||
The "intr" line gives counts of interrupts serviced since boot time, for each
|
||||
of the possible system interrupts. The first column is the total of all
|
||||
interrupts serviced; each subsequent column is the total for that particular
|
||||
interrupt.
|
||||
interrupts serviced including unnumbered architecture specific interrupts;
|
||||
each subsequent column is the total for that particular numbered interrupt.
|
||||
Unnumbered interrupts are not shown, only summed into the total.
|
||||
|
||||
The "ctxt" line gives the total number of context switches across all CPUs.
|
||||
|
||||
|
@ -327,6 +327,13 @@ temp[1-*]_max_hyst
|
||||
from the max value.
|
||||
RW
|
||||
|
||||
temp[1-*]_min_hyst
|
||||
Temperature hysteresis value for min limit.
|
||||
Unit: millidegree Celsius
|
||||
Must be reported as an absolute temperature, NOT a delta
|
||||
from the min value.
|
||||
RW
|
||||
|
||||
temp[1-*]_input Temperature input value.
|
||||
Unit: millidegree Celsius
|
||||
RO
|
||||
@ -362,6 +369,13 @@ temp[1-*]_lcrit Temperature critical min value, typically lower than
|
||||
Unit: millidegree Celsius
|
||||
RW
|
||||
|
||||
temp[1-*]_lcrit_hyst
|
||||
Temperature hysteresis value for critical min limit.
|
||||
Unit: millidegree Celsius
|
||||
Must be reported as an absolute temperature, NOT a delta
|
||||
from the critical min value.
|
||||
RW
|
||||
|
||||
temp[1-*]_offset
|
||||
Temperature offset which is added to the temperature reading
|
||||
by the chip.
|
||||
|
@ -188,6 +188,9 @@ shift
|
||||
#define CP_METHODREF 10
|
||||
#define CP_INTERFACEMETHODREF 11
|
||||
#define CP_NAMEANDTYPE 12
|
||||
#define CP_METHODHANDLE 15
|
||||
#define CP_METHODTYPE 16
|
||||
#define CP_INVOKEDYNAMIC 18
|
||||
|
||||
/* Define some commonly used error messages */
|
||||
|
||||
@ -242,14 +245,19 @@ void skip_constant(FILE *classfile, u_int16_t *cur)
|
||||
break;
|
||||
case CP_CLASS:
|
||||
case CP_STRING:
|
||||
case CP_METHODTYPE:
|
||||
seekerr = fseek(classfile, 2, SEEK_CUR);
|
||||
break;
|
||||
case CP_METHODHANDLE:
|
||||
seekerr = fseek(classfile, 3, SEEK_CUR);
|
||||
break;
|
||||
case CP_INTEGER:
|
||||
case CP_FLOAT:
|
||||
case CP_FIELDREF:
|
||||
case CP_METHODREF:
|
||||
case CP_INTERFACEMETHODREF:
|
||||
case CP_NAMEANDTYPE:
|
||||
case CP_INVOKEDYNAMIC:
|
||||
seekerr = fseek(classfile, 4, SEEK_CUR);
|
||||
break;
|
||||
case CP_LONG:
|
||||
|
@ -2126,7 +2126,7 @@ into the hash PTE second double word).
|
||||
4.75 KVM_IRQFD
|
||||
|
||||
Capability: KVM_CAP_IRQFD
|
||||
Architectures: x86
|
||||
Architectures: x86 s390
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_irqfd (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
11
MAINTAINERS
11
MAINTAINERS
@ -3158,10 +3158,9 @@ S: Maintained
|
||||
F: drivers/scsi/eata_pio.*
|
||||
|
||||
EBTABLES
|
||||
M: Bart De Schuymer <bart.de.schuymer@pandora.be>
|
||||
L: netfilter-devel@vger.kernel.org
|
||||
W: http://ebtables.sourceforge.net/
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: include/linux/netfilter_bridge/ebt_*.h
|
||||
F: include/uapi/linux/netfilter_bridge/ebt_*.h
|
||||
F: net/bridge/netfilter/ebt*.c
|
||||
@ -7410,6 +7409,14 @@ F: drivers/rpmsg/
|
||||
F: Documentation/rpmsg.txt
|
||||
F: include/linux/rpmsg.h
|
||||
|
||||
RESET CONTROLLER FRAMEWORK
|
||||
M: Philipp Zabel <p.zabel@pengutronix.de>
|
||||
S: Maintained
|
||||
F: drivers/reset/
|
||||
F: Documentation/devicetree/bindings/reset/
|
||||
F: include/linux/reset.h
|
||||
F: include/linux/reset-controller.h
|
||||
|
||||
RFKILL
|
||||
M: Johannes Berg <johannes@sipsolutions.net>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Shuffling Zombie Juror
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -99,7 +99,7 @@
|
||||
pcie@3,0 {
|
||||
device_type = "pci";
|
||||
assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
|
||||
reg = <0x1000 0 0 0 0>;
|
||||
reg = <0x1800 0 0 0 0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
|
@ -110,7 +110,7 @@
|
||||
pcie@3,0 {
|
||||
device_type = "pci";
|
||||
assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
|
||||
reg = <0x1000 0 0 0 0>;
|
||||
reg = <0x1800 0 0 0 0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
@ -131,7 +131,7 @@
|
||||
pcie@4,0 {
|
||||
device_type = "pci";
|
||||
assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
|
||||
reg = <0x1000 0 0 0 0>;
|
||||
reg = <0x2000 0 0 0 0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
|
@ -641,7 +641,7 @@
|
||||
trigger@3 {
|
||||
reg = <3>;
|
||||
trigger-name = "external";
|
||||
trigger-value = <0x13>;
|
||||
trigger-value = <0xd>;
|
||||
trigger-external;
|
||||
};
|
||||
};
|
||||
|
@ -503,7 +503,7 @@
|
||||
status = "okay";
|
||||
|
||||
ak8975@0c {
|
||||
compatible = "ak,ak8975";
|
||||
compatible = "asahi-kasei,ak8975";
|
||||
reg = <0x0c>;
|
||||
gpios = <&gpj0 7 0>;
|
||||
};
|
||||
|
@ -107,6 +107,7 @@
|
||||
regulator-name = "VDD_IOPERI_1.8V";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-always-on;
|
||||
op_mode = <1>;
|
||||
};
|
||||
|
||||
|
@ -364,16 +364,4 @@
|
||||
gpio-key,wakeup;
|
||||
};
|
||||
};
|
||||
|
||||
amba {
|
||||
mdma1: mdma@11C10000 {
|
||||
/*
|
||||
* MDMA1 can support both secure and non-secure
|
||||
* AXI transactions. When this is enabled in the kernel
|
||||
* for boards that run in secure mode, we are getting
|
||||
* imprecise external aborts causing the kernel to oops.
|
||||
*/
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -219,16 +219,6 @@
|
||||
reg = <0x100440C0 0x20>;
|
||||
};
|
||||
|
||||
mau_pd: power-domain@100440E0 {
|
||||
compatible = "samsung,exynos4210-pd";
|
||||
reg = <0x100440E0 0x20>;
|
||||
};
|
||||
|
||||
g2d_pd: power-domain@10044100 {
|
||||
compatible = "samsung,exynos4210-pd";
|
||||
reg = <0x10044100 0x20>;
|
||||
};
|
||||
|
||||
msc_pd: power-domain@10044120 {
|
||||
compatible = "samsung,exynos4210-pd";
|
||||
reg = <0x10044120 0x20>;
|
||||
@ -336,6 +326,13 @@
|
||||
#dma-cells = <1>;
|
||||
#dma-channels = <8>;
|
||||
#dma-requests = <1>;
|
||||
/*
|
||||
* MDMA1 can support both secure and non-secure
|
||||
* AXI transactions. When this is enabled in the kernel
|
||||
* for boards that run in secure mode, we are getting
|
||||
* imprecise external aborts causing the kernel to oops.
|
||||
*/
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
||||
@ -385,7 +382,7 @@
|
||||
spi_0: spi@12d20000 {
|
||||
compatible = "samsung,exynos4210-spi";
|
||||
reg = <0x12d20000 0x100>;
|
||||
interrupts = <0 66 0>;
|
||||
interrupts = <0 68 0>;
|
||||
dmas = <&pdma0 5
|
||||
&pdma0 4>;
|
||||
dma-names = "tx", "rx";
|
||||
@ -401,7 +398,7 @@
|
||||
spi_1: spi@12d30000 {
|
||||
compatible = "samsung,exynos4210-spi";
|
||||
reg = <0x12d30000 0x100>;
|
||||
interrupts = <0 67 0>;
|
||||
interrupts = <0 69 0>;
|
||||
dmas = <&pdma1 5
|
||||
&pdma1 4>;
|
||||
dma-names = "tx", "rx";
|
||||
@ -417,7 +414,7 @@
|
||||
spi_2: spi@12d40000 {
|
||||
compatible = "samsung,exynos4210-spi";
|
||||
reg = <0x12d40000 0x100>;
|
||||
interrupts = <0 68 0>;
|
||||
interrupts = <0 70 0>;
|
||||
dmas = <&pdma0 7
|
||||
&pdma0 6>;
|
||||
dma-names = "tx", "rx";
|
||||
@ -730,6 +727,5 @@
|
||||
interrupts = <0 112 0>;
|
||||
clocks = <&clock 471>;
|
||||
clock-names = "secss";
|
||||
samsung,power-domain = <&g2d_pd>;
|
||||
};
|
||||
};
|
||||
|
@ -433,8 +433,12 @@ static void bL_switcher_restore_cpus(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_cpu(i, &bL_switcher_removed_logical_cpus)
|
||||
cpu_up(i);
|
||||
for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
|
||||
struct device *cpu_dev = get_cpu_device(i);
|
||||
int ret = device_online(cpu_dev);
|
||||
if (ret)
|
||||
dev_err(cpu_dev, "switcher: unable to restore CPU\n");
|
||||
}
|
||||
}
|
||||
|
||||
static int bL_switcher_halve_cpus(void)
|
||||
@ -521,7 +525,7 @@ static int bL_switcher_halve_cpus(void)
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = cpu_down(i);
|
||||
ret = device_offline(get_cpu_device(i));
|
||||
if (ret) {
|
||||
bL_switcher_restore_cpus();
|
||||
return ret;
|
||||
|
@ -65,6 +65,7 @@ CONFIG_TCG_TIS_I2C_INFINEON=y
|
||||
CONFIG_I2C=y
|
||||
CONFIG_I2C_MUX=y
|
||||
CONFIG_I2C_ARB_GPIO_CHALLENGE=y
|
||||
CONFIG_I2C_EXYNOS5=y
|
||||
CONFIG_I2C_S3C2410=y
|
||||
CONFIG_DEBUG_GPIO=y
|
||||
# CONFIG_HWMON is not set
|
||||
|
@ -54,7 +54,9 @@ static inline void register_trusted_foundations(
|
||||
*/
|
||||
pr_err("No support for Trusted Foundations, continuing in degraded mode.\n");
|
||||
pr_err("Secondary processors as well as CPU PM will be disabled.\n");
|
||||
#if IS_ENABLED(CONFIG_SMP)
|
||||
setup_max_cpus = 0;
|
||||
#endif
|
||||
cpu_idle_poll_ctrl(true);
|
||||
}
|
||||
|
||||
|
@ -171,8 +171,9 @@ extern int __put_user_8(void *, unsigned long long);
|
||||
#define __put_user_check(x,p) \
|
||||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
const typeof(*(p)) __user *__tmp_p = (p); \
|
||||
register const typeof(*(p)) __r2 asm("r2") = (x); \
|
||||
register const typeof(*(p)) __user *__p asm("r0") = (p);\
|
||||
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
switch (sizeof(*(__p))) { \
|
||||
|
@ -132,6 +132,10 @@
|
||||
orrne r5, V7M_xPSR_FRAMEPTRALIGN
|
||||
biceq r5, V7M_xPSR_FRAMEPTRALIGN
|
||||
|
||||
@ ensure bit 0 is cleared in the PC, otherwise behaviour is
|
||||
@ unpredictable
|
||||
bic r4, #1
|
||||
|
||||
@ write basic exception frame
|
||||
stmdb r2!, {r1, r3-r5}
|
||||
ldmia sp, {r1, r3-r5}
|
||||
|
@ -285,7 +285,7 @@ static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
|
||||
if (unwind_pop_register(ctrl, &vsp, reg))
|
||||
return -URC_FAILURE;
|
||||
|
||||
if (insn & 0x80)
|
||||
if (insn & 0x8)
|
||||
if (unwind_pop_register(ctrl, &vsp, 14))
|
||||
return -URC_FAILURE;
|
||||
|
||||
|
@ -1308,19 +1308,19 @@ static struct platform_device at91_adc_device = {
|
||||
static struct at91_adc_trigger at91_adc_triggers[] = {
|
||||
[0] = {
|
||||
.name = "timer-counter-0",
|
||||
.value = AT91_ADC_TRGSEL_TC0 | AT91_ADC_TRGEN,
|
||||
.value = 0x1,
|
||||
},
|
||||
[1] = {
|
||||
.name = "timer-counter-1",
|
||||
.value = AT91_ADC_TRGSEL_TC1 | AT91_ADC_TRGEN,
|
||||
.value = 0x3,
|
||||
},
|
||||
[2] = {
|
||||
.name = "timer-counter-2",
|
||||
.value = AT91_ADC_TRGSEL_TC2 | AT91_ADC_TRGEN,
|
||||
.value = 0x5,
|
||||
},
|
||||
[3] = {
|
||||
.name = "external",
|
||||
.value = AT91_ADC_TRGSEL_EXTERNAL | AT91_ADC_TRGEN,
|
||||
.value = 0xd,
|
||||
.is_external = true,
|
||||
},
|
||||
};
|
||||
|
@ -18,6 +18,8 @@
|
||||
|
||||
#include <mach/map.h>
|
||||
|
||||
#include <plat/cpu.h>
|
||||
|
||||
#include "smc.h"
|
||||
|
||||
static int exynos_do_idle(void)
|
||||
@ -28,13 +30,24 @@ static int exynos_do_idle(void)
|
||||
|
||||
static int exynos_cpu_boot(int cpu)
|
||||
{
|
||||
/*
|
||||
* The second parameter of SMC_CMD_CPU1BOOT command means CPU id.
|
||||
* But, Exynos4212 has only one secondary CPU so second parameter
|
||||
* isn't used for informing secure firmware about CPU id.
|
||||
*/
|
||||
if (soc_is_exynos4212())
|
||||
cpu = 0;
|
||||
|
||||
exynos_smc(SMC_CMD_CPU1BOOT, cpu, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
|
||||
{
|
||||
void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c + 4*cpu;
|
||||
void __iomem *boot_reg = S5P_VA_SYSRAM_NS + 0x1c;
|
||||
|
||||
if (!soc_is_exynos4212())
|
||||
boot_reg += 4*cpu;
|
||||
|
||||
__raw_writel(boot_addr, boot_reg);
|
||||
return 0;
|
||||
|
@ -77,7 +77,7 @@ struct platform_device *__init imx_alloc_mx3_camera(
|
||||
|
||||
pdev = platform_device_alloc("mx3-camera", 0);
|
||||
if (!pdev)
|
||||
goto err;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pdev->dev.dma_mask = kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
|
||||
if (!pdev->dev.dma_mask)
|
||||
|
@ -108,7 +108,18 @@ static int __init mvebu_soc_id_init(void)
|
||||
iounmap(pci_base);
|
||||
|
||||
res_ioremap:
|
||||
clk_disable_unprepare(clk);
|
||||
/*
|
||||
* If the PCIe unit is actually enabled and we have PCI
|
||||
* support in the kernel, we intentionally do not release the
|
||||
* reference to the clock. We want to keep it running since
|
||||
* the bootloader does some PCIe link configuration that the
|
||||
* kernel is for now unable to do, and gating the clock would
|
||||
* make us loose this precious configuration.
|
||||
*/
|
||||
if (!of_device_is_available(child) || !IS_ENABLED(CONFIG_PCI_MVEBU)) {
|
||||
clk_disable_unprepare(clk);
|
||||
clk_put(clk);
|
||||
}
|
||||
|
||||
clk_err:
|
||||
of_node_put(child);
|
||||
|
@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs,
|
||||
board_nand_data.nr_parts = nr_parts;
|
||||
board_nand_data.devsize = nand_type;
|
||||
|
||||
board_nand_data.ecc_opt = OMAP_ECC_BCH8_CODE_HW;
|
||||
board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW;
|
||||
gpmc_nand_init(&board_nand_data, gpmc_t);
|
||||
}
|
||||
#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
|
||||
|
@ -456,7 +456,8 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = {
|
||||
.clkdm_name = "dpll4_clkdm",
|
||||
};
|
||||
|
||||
DEFINE_STRUCT_CLK(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops);
|
||||
DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
|
||||
dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
|
||||
|
||||
static struct clk dpll4_m5x2_ck_3630 = {
|
||||
.name = "dpll4_m5x2_ck",
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/clockchips.h>
|
||||
|
||||
#include <asm/cpuidle.h>
|
||||
#include <asm/proc-fns.h>
|
||||
@ -83,6 +84,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
{
|
||||
struct idle_statedata *cx = state_ptr + index;
|
||||
u32 mpuss_can_lose_context = 0;
|
||||
int cpu_id = smp_processor_id();
|
||||
|
||||
/*
|
||||
* CPU0 has to wait and stay ON until CPU1 is OFF state.
|
||||
@ -110,6 +112,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
|
||||
(cx->mpu_logic_state == PWRDM_POWER_OFF);
|
||||
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
|
||||
|
||||
/*
|
||||
* Call idle CPU PM enter notifier chain so that
|
||||
* VFP and per CPU interrupt context is saved.
|
||||
@ -165,6 +169,8 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
if (dev->cpu == 0 && mpuss_can_lose_context)
|
||||
cpu_cluster_pm_exit();
|
||||
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
|
||||
|
||||
fail:
|
||||
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
|
||||
cpu_done[dev->cpu] = false;
|
||||
@ -172,6 +178,16 @@ fail:
|
||||
return index;
|
||||
}
|
||||
|
||||
/*
|
||||
* For each cpu, setup the broadcast timer because local timers
|
||||
* stops for the states above C1.
|
||||
*/
|
||||
static void omap_setup_broadcast_timer(void *arg)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
|
||||
}
|
||||
|
||||
static struct cpuidle_driver omap4_idle_driver = {
|
||||
.name = "omap4_idle",
|
||||
.owner = THIS_MODULE,
|
||||
@ -189,8 +205,7 @@ static struct cpuidle_driver omap4_idle_driver = {
|
||||
/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
|
||||
.exit_latency = 328 + 440,
|
||||
.target_residency = 960,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
|
||||
CPUIDLE_FLAG_TIMER_STOP,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
|
||||
.enter = omap_enter_idle_coupled,
|
||||
.name = "C2",
|
||||
.desc = "CPUx OFF, MPUSS CSWR",
|
||||
@ -199,8 +214,7 @@ static struct cpuidle_driver omap4_idle_driver = {
|
||||
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
|
||||
.exit_latency = 460 + 518,
|
||||
.target_residency = 1100,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
|
||||
CPUIDLE_FLAG_TIMER_STOP,
|
||||
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
|
||||
.enter = omap_enter_idle_coupled,
|
||||
.name = "C3",
|
||||
.desc = "CPUx OFF, MPUSS OSWR",
|
||||
@ -231,5 +245,8 @@ int __init omap4_idle_init(void)
|
||||
if (!cpu_clkdm[0] || !cpu_clkdm[1])
|
||||
return -ENODEV;
|
||||
|
||||
/* Configure the broadcast timer on each cpu */
|
||||
on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
|
||||
|
||||
return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
|
||||
}
|
||||
|
@ -895,7 +895,7 @@ static struct omap_hwmod omap54xx_mcpdm_hwmod = {
|
||||
* current exception.
|
||||
*/
|
||||
|
||||
.flags = HWMOD_EXT_OPT_MAIN_CLK,
|
||||
.flags = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE,
|
||||
.main_clk = "pad_clks_ck",
|
||||
.prcm = {
|
||||
.omap4 = {
|
||||
|
@ -123,6 +123,11 @@ __v7m_setup:
|
||||
mov pc, lr
|
||||
ENDPROC(__v7m_setup)
|
||||
|
||||
.align 2
|
||||
__v7m_setup_stack:
|
||||
.space 4 * 8 @ 8 registers
|
||||
__v7m_setup_stack_top:
|
||||
|
||||
define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
|
||||
|
||||
.section ".rodata"
|
||||
@ -152,6 +157,3 @@ __v7m_proc_info:
|
||||
.long nop_cache_fns @ proc_info_list.cache
|
||||
.size __v7m_proc_info, . - __v7m_proc_info
|
||||
|
||||
__v7m_setup_stack:
|
||||
.space 4 * 8 @ 8 registers
|
||||
__v7m_setup_stack_top:
|
||||
|
@ -70,6 +70,7 @@ static u32 errata;
|
||||
|
||||
static struct omap_dma_global_context_registers {
|
||||
u32 dma_irqenable_l0;
|
||||
u32 dma_irqenable_l1;
|
||||
u32 dma_ocp_sysconfig;
|
||||
u32 dma_gcr;
|
||||
} omap_dma_global_context;
|
||||
@ -1973,10 +1974,17 @@ static struct irqaction omap24xx_dma_irq;
|
||||
|
||||
/*----------------------------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
* Note that we are currently using only IRQENABLE_L0 and L1.
|
||||
* As the DSP may be using IRQENABLE_L2 and L3, let's not
|
||||
* touch those for now.
|
||||
*/
|
||||
void omap_dma_global_context_save(void)
|
||||
{
|
||||
omap_dma_global_context.dma_irqenable_l0 =
|
||||
p->dma_read(IRQENABLE_L0, 0);
|
||||
omap_dma_global_context.dma_irqenable_l1 =
|
||||
p->dma_read(IRQENABLE_L1, 0);
|
||||
omap_dma_global_context.dma_ocp_sysconfig =
|
||||
p->dma_read(OCP_SYSCONFIG, 0);
|
||||
omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
|
||||
@ -1991,6 +1999,8 @@ void omap_dma_global_context_restore(void)
|
||||
OCP_SYSCONFIG, 0);
|
||||
p->dma_write(omap_dma_global_context.dma_irqenable_l0,
|
||||
IRQENABLE_L0, 0);
|
||||
p->dma_write(omap_dma_global_context.dma_irqenable_l1,
|
||||
IRQENABLE_L1, 0);
|
||||
|
||||
if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
|
||||
p->dma_write(0x3 , IRQSTATUS_L0, 0);
|
||||
|
@ -266,7 +266,7 @@ static inline pmd_t pte_pmd(pte_t pte)
|
||||
|
||||
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
|
||||
|
||||
#define set_pmd_at(mm, addr, pmdp, pmd) set_pmd(pmdp, pmd)
|
||||
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
|
||||
|
||||
static inline int has_transparent_hugepage(void)
|
||||
{
|
||||
|
@ -151,7 +151,7 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \
|
||||
-Wa,--trap
|
||||
cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \
|
||||
-Wa,--trap
|
||||
cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \
|
||||
cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1 -mno-mdmx -mno-mips3d,-march=r5000) \
|
||||
-Wa,--trap
|
||||
cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap
|
||||
cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \
|
||||
|
@ -39,14 +39,14 @@ struct cache_desc {
|
||||
#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
|
||||
|
||||
struct cpuinfo_mips {
|
||||
unsigned int udelay_val;
|
||||
unsigned int asid_cache;
|
||||
unsigned long asid_cache;
|
||||
|
||||
/*
|
||||
* Capability and feature descriptor structure for MIPS CPU
|
||||
*/
|
||||
unsigned long options;
|
||||
unsigned long ases;
|
||||
unsigned int udelay_val;
|
||||
unsigned int processor_id;
|
||||
unsigned int fpu_id;
|
||||
unsigned int msa_id;
|
||||
|
@ -381,7 +381,7 @@
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
|
||||
|
||||
#define __NR_O32_Linux 4000
|
||||
#define __NR_O32_Linux_syscalls 350
|
||||
#define __NR_O32_Linux_syscalls 351
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI64
|
||||
|
||||
@ -710,7 +710,7 @@
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
|
||||
|
||||
#define __NR_64_Linux 5000
|
||||
#define __NR_64_Linux_syscalls 310
|
||||
#define __NR_64_Linux_syscalls 311
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_NABI32
|
||||
|
||||
@ -1043,6 +1043,6 @@
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
|
||||
|
||||
#define __NR_N32_Linux 6000
|
||||
#define __NR_N32_Linux_syscalls 314
|
||||
#define __NR_N32_Linux_syscalls 315
|
||||
|
||||
#endif /* _UAPI_ASM_UNISTD_H */
|
||||
|
@ -317,7 +317,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
|
||||
if (regs->regs[insn.i_format.rs] ==
|
||||
regs->regs[insn.i_format.rt]) {
|
||||
epc = epc + 4 + (insn.i_format.simmediate << 2);
|
||||
if (insn.i_format.rt == beql_op)
|
||||
if (insn.i_format.opcode == beql_op)
|
||||
ret = BRANCH_LIKELY_TAKEN;
|
||||
} else
|
||||
epc += 8;
|
||||
@ -329,7 +329,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
|
||||
if (regs->regs[insn.i_format.rs] !=
|
||||
regs->regs[insn.i_format.rt]) {
|
||||
epc = epc + 4 + (insn.i_format.simmediate << 2);
|
||||
if (insn.i_format.rt == bnel_op)
|
||||
if (insn.i_format.opcode == bnel_op)
|
||||
ret = BRANCH_LIKELY_TAKEN;
|
||||
} else
|
||||
epc += 8;
|
||||
@ -341,7 +341,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
|
||||
/* rt field assumed to be zero */
|
||||
if ((long)regs->regs[insn.i_format.rs] <= 0) {
|
||||
epc = epc + 4 + (insn.i_format.simmediate << 2);
|
||||
if (insn.i_format.rt == bnel_op)
|
||||
if (insn.i_format.opcode == blezl_op)
|
||||
ret = BRANCH_LIKELY_TAKEN;
|
||||
} else
|
||||
epc += 8;
|
||||
@ -353,7 +353,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
|
||||
/* rt field assumed to be zero */
|
||||
if ((long)regs->regs[insn.i_format.rs] > 0) {
|
||||
epc = epc + 4 + (insn.i_format.simmediate << 2);
|
||||
if (insn.i_format.rt == bnel_op)
|
||||
if (insn.i_format.opcode == bgtzl_op)
|
||||
ret = BRANCH_LIKELY_TAKEN;
|
||||
} else
|
||||
epc += 8;
|
||||
|
@ -163,7 +163,7 @@ int ptrace_get_watch_regs(struct task_struct *child,
|
||||
enum pt_watch_style style;
|
||||
int i;
|
||||
|
||||
if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
|
||||
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
|
||||
return -EIO;
|
||||
if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
|
||||
return -EIO;
|
||||
@ -177,14 +177,14 @@ int ptrace_get_watch_regs(struct task_struct *child,
|
||||
#endif
|
||||
|
||||
__put_user(style, &addr->style);
|
||||
__put_user(current_cpu_data.watch_reg_use_cnt,
|
||||
__put_user(boot_cpu_data.watch_reg_use_cnt,
|
||||
&addr->WATCH_STYLE.num_valid);
|
||||
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
|
||||
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
|
||||
__put_user(child->thread.watch.mips3264.watchlo[i],
|
||||
&addr->WATCH_STYLE.watchlo[i]);
|
||||
__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
|
||||
&addr->WATCH_STYLE.watchhi[i]);
|
||||
__put_user(current_cpu_data.watch_reg_masks[i],
|
||||
__put_user(boot_cpu_data.watch_reg_masks[i],
|
||||
&addr->WATCH_STYLE.watch_masks[i]);
|
||||
}
|
||||
for (; i < 8; i++) {
|
||||
@ -204,12 +204,12 @@ int ptrace_set_watch_regs(struct task_struct *child,
|
||||
unsigned long lt[NUM_WATCH_REGS];
|
||||
u16 ht[NUM_WATCH_REGS];
|
||||
|
||||
if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
|
||||
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
|
||||
return -EIO;
|
||||
if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
|
||||
return -EIO;
|
||||
/* Check the values. */
|
||||
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
|
||||
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
|
||||
__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
|
||||
#ifdef CONFIG_32BIT
|
||||
if (lt[i] & __UA_LIMIT)
|
||||
@ -228,7 +228,7 @@ int ptrace_set_watch_regs(struct task_struct *child,
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Install them. */
|
||||
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
|
||||
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
|
||||
if (lt[i] & 7)
|
||||
watch_active = 1;
|
||||
child->thread.watch.mips3264.watchlo[i] = lt[i];
|
||||
|
@ -1545,7 +1545,7 @@ asmlinkage void cache_parity_error(void)
|
||||
reg_val & (1<<30) ? "secondary" : "primary",
|
||||
reg_val & (1<<31) ? "data" : "insn");
|
||||
if (cpu_has_mips_r2 &&
|
||||
((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
|
||||
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
|
||||
pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
|
||||
reg_val & (1<<29) ? "ED " : "",
|
||||
reg_val & (1<<28) ? "ET " : "",
|
||||
@ -1585,7 +1585,7 @@ asmlinkage void do_ftlb(void)
|
||||
|
||||
/* For the moment, report the problem and hang. */
|
||||
if (cpu_has_mips_r2 &&
|
||||
((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
|
||||
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
|
||||
pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
|
||||
read_c0_ecc());
|
||||
pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
|
||||
|
@ -27,8 +27,7 @@
|
||||
|
||||
#include <cs5536/cs5536_mfgpt.h>
|
||||
|
||||
DEFINE_SPINLOCK(mfgpt_lock);
|
||||
EXPORT_SYMBOL(mfgpt_lock);
|
||||
static DEFINE_RAW_SPINLOCK(mfgpt_lock);
|
||||
|
||||
static u32 mfgpt_base;
|
||||
|
||||
@ -55,7 +54,7 @@ EXPORT_SYMBOL(enable_mfgpt0_counter);
|
||||
static void init_mfgpt_timer(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt)
|
||||
{
|
||||
spin_lock(&mfgpt_lock);
|
||||
raw_spin_lock(&mfgpt_lock);
|
||||
|
||||
switch (mode) {
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
@ -79,7 +78,7 @@ static void init_mfgpt_timer(enum clock_event_mode mode,
|
||||
/* Nothing to do here */
|
||||
break;
|
||||
}
|
||||
spin_unlock(&mfgpt_lock);
|
||||
raw_spin_unlock(&mfgpt_lock);
|
||||
}
|
||||
|
||||
static struct clock_event_device mfgpt_clockevent = {
|
||||
@ -157,7 +156,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
|
||||
static int old_count;
|
||||
static u32 old_jifs;
|
||||
|
||||
spin_lock_irqsave(&mfgpt_lock, flags);
|
||||
raw_spin_lock_irqsave(&mfgpt_lock, flags);
|
||||
/*
|
||||
* Although our caller may have the read side of xtime_lock,
|
||||
* this is now a seqlock, and we are cheating in this routine
|
||||
@ -191,7 +190,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
|
||||
old_count = count;
|
||||
old_jifs = jifs;
|
||||
|
||||
spin_unlock_irqrestore(&mfgpt_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
|
||||
|
||||
return (cycle_t) (jifs * COMPARE) + count;
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ void build_clear_page(void)
|
||||
uasm_i_ori(&buf, A2, A0, off);
|
||||
|
||||
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
||||
uasm_i_lui(&buf, AT, 0xa000);
|
||||
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
|
||||
|
||||
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
|
||||
* cache_line_size : 0;
|
||||
@ -424,7 +424,7 @@ void build_copy_page(void)
|
||||
uasm_i_ori(&buf, A2, A0, off);
|
||||
|
||||
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
|
||||
uasm_i_lui(&buf, AT, 0xa000);
|
||||
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
|
||||
|
||||
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
|
||||
cache_line_size : 0;
|
||||
|
@ -27,7 +27,7 @@ unsigned long physical_memsize = 0L;
|
||||
fw_memblock_t * __init fw_getmdesc(int eva)
|
||||
{
|
||||
char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr;
|
||||
unsigned long memsize, ememsize __maybe_unused = 0;
|
||||
unsigned long memsize = 0, ememsize __maybe_unused = 0;
|
||||
static char cmdline[COMMAND_LINE_SIZE] __initdata;
|
||||
int tmp;
|
||||
|
||||
|
@ -53,7 +53,6 @@ static struct resource rc32434_res_pci_mem1 = {
|
||||
.start = 0x50000000,
|
||||
.end = 0x5FFFFFFF,
|
||||
.flags = IORESOURCE_MEM,
|
||||
.parent = &rc32434_res_pci_mem1,
|
||||
.sibling = NULL,
|
||||
.child = &rc32434_res_pci_mem2
|
||||
};
|
||||
|
@ -150,7 +150,9 @@ endif
|
||||
|
||||
CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
|
||||
|
||||
KBUILD_CPPFLAGS += -Iarch/$(ARCH)
|
||||
asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
|
||||
|
||||
KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
|
||||
KBUILD_AFLAGS += -Iarch/$(ARCH)
|
||||
KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
|
||||
CPP = $(CC) -E $(KBUILD_CFLAGS)
|
||||
|
@ -318,11 +318,16 @@ n:
|
||||
addi reg,reg,(name - 0b)@l;
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#ifdef HAVE_AS_ATHIGH
|
||||
#define __AS_ATHIGH high
|
||||
#else
|
||||
#define __AS_ATHIGH h
|
||||
#endif
|
||||
#define LOAD_REG_IMMEDIATE(reg,expr) \
|
||||
lis reg,(expr)@highest; \
|
||||
ori reg,reg,(expr)@higher; \
|
||||
rldicr reg,reg,32,31; \
|
||||
oris reg,reg,(expr)@h; \
|
||||
oris reg,reg,(expr)@__AS_ATHIGH; \
|
||||
ori reg,reg,(expr)@l;
|
||||
|
||||
#define LOAD_REG_ADDR(reg,name) \
|
||||
|
@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
|
||||
(unsigned long)_stext < end;
|
||||
}
|
||||
|
||||
static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
|
||||
{
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
extern char kvm_tmp[];
|
||||
return start < (unsigned long)kvm_tmp &&
|
||||
(unsigned long)&kvm_tmp[1024 * 1024] < end;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#undef dereference_function_descriptor
|
||||
static inline void *dereference_function_descriptor(void *ptr)
|
||||
{
|
||||
|
@ -361,3 +361,4 @@ SYSCALL(finit_module)
|
||||
SYSCALL(ni_syscall) /* sys_kcmp */
|
||||
SYSCALL_SPU(sched_setattr)
|
||||
SYSCALL_SPU(sched_getattr)
|
||||
SYSCALL_SPU(renameat2)
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define __NR_syscalls 357
|
||||
#define __NR_syscalls 358
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
#define NR_syscalls __NR_syscalls
|
||||
|
@ -379,5 +379,6 @@
|
||||
#define __NR_kcmp 354
|
||||
#define __NR_sched_setattr 355
|
||||
#define __NR_sched_getattr 356
|
||||
#define __NR_renameat2 357
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
@ -74,7 +74,7 @@
|
||||
#define KVM_INST_MTSRIN 0x7c0001e4
|
||||
|
||||
static bool kvm_patching_worked = true;
|
||||
static char kvm_tmp[1024 * 1024];
|
||||
char kvm_tmp[1024 * 1024];
|
||||
static int kvm_tmp_index;
|
||||
|
||||
static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
|
||||
|
@ -237,7 +237,7 @@ static void wake_offline_cpus(void)
|
||||
if (!cpu_online(cpu)) {
|
||||
printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
|
||||
cpu);
|
||||
cpu_up(cpu);
|
||||
WARN_ON(cpu_up(cpu));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -886,7 +886,7 @@ static int kvmppc_book3s_init(void)
|
||||
r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
||||
if (r)
|
||||
return r;
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
r = kvmppc_book3s_init_pr();
|
||||
#endif
|
||||
return r;
|
||||
@ -895,7 +895,7 @@ static int kvmppc_book3s_init(void)
|
||||
|
||||
static void kvmppc_book3s_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
kvmppc_book3s_exit_pr();
|
||||
#endif
|
||||
kvm_exit();
|
||||
@ -905,7 +905,7 @@ module_init(kvmppc_book3s_init);
|
||||
module_exit(kvmppc_book3s_exit);
|
||||
|
||||
/* On 32bit this is our one and only kernel module */
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
MODULE_ALIAS_MISCDEV(KVM_MINOR);
|
||||
MODULE_ALIAS("devname:kvm");
|
||||
#endif
|
||||
|
@ -234,7 +234,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
pte_size = psize;
|
||||
pte = lookup_linux_pte_and_update(pgdir, hva, writing,
|
||||
&pte_size);
|
||||
if (pte_present(pte)) {
|
||||
if (pte_present(pte) && !pte_numa(pte)) {
|
||||
if (writing && !pte_write(pte))
|
||||
/* make the actual HPTE be read-only */
|
||||
ptel = hpte_make_readonly(ptel);
|
||||
|
@ -1323,6 +1323,110 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
mr r3, r9
|
||||
bl kvmppc_save_fp
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
b 2f
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_TM)
|
||||
/* Turn on TM. */
|
||||
mfmsr r8
|
||||
li r0, 1
|
||||
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
|
||||
mtmsrd r8
|
||||
|
||||
ld r5, VCPU_MSR(r9)
|
||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
||||
beq 1f /* TM not active in guest. */
|
||||
|
||||
li r3, TM_CAUSE_KVM_RESCHED
|
||||
|
||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||
li r5, 0
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* All GPRs are volatile at this point. */
|
||||
TRECLAIM(R3)
|
||||
|
||||
/* Temporarily store r13 and r9 so we have some regs to play with */
|
||||
SET_SCRATCH0(r13)
|
||||
GET_PACA(r13)
|
||||
std r9, PACATMSCRATCH(r13)
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
|
||||
/* Get a few more GPRs free. */
|
||||
std r29, VCPU_GPRS_TM(29)(r9)
|
||||
std r30, VCPU_GPRS_TM(30)(r9)
|
||||
std r31, VCPU_GPRS_TM(31)(r9)
|
||||
|
||||
/* Save away PPR and DSCR soon so don't run with user values. */
|
||||
mfspr r31, SPRN_PPR
|
||||
HMT_MEDIUM
|
||||
mfspr r30, SPRN_DSCR
|
||||
ld r29, HSTATE_DSCR(r13)
|
||||
mtspr SPRN_DSCR, r29
|
||||
|
||||
/* Save all but r9, r13 & r29-r31 */
|
||||
reg = 0
|
||||
.rept 29
|
||||
.if (reg != 9) && (reg != 13)
|
||||
std reg, VCPU_GPRS_TM(reg)(r9)
|
||||
.endif
|
||||
reg = reg + 1
|
||||
.endr
|
||||
/* ... now save r13 */
|
||||
GET_SCRATCH0(r4)
|
||||
std r4, VCPU_GPRS_TM(13)(r9)
|
||||
/* ... and save r9 */
|
||||
ld r4, PACATMSCRATCH(r13)
|
||||
std r4, VCPU_GPRS_TM(9)(r9)
|
||||
|
||||
/* Reload stack pointer and TOC. */
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
ld r2, PACATOC(r13)
|
||||
|
||||
/* Set MSR RI now we have r1 and r13 back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* Save away checkpinted SPRs. */
|
||||
std r31, VCPU_PPR_TM(r9)
|
||||
std r30, VCPU_DSCR_TM(r9)
|
||||
mflr r5
|
||||
mfcr r6
|
||||
mfctr r7
|
||||
mfspr r8, SPRN_AMR
|
||||
mfspr r10, SPRN_TAR
|
||||
std r5, VCPU_LR_TM(r9)
|
||||
stw r6, VCPU_CR_TM(r9)
|
||||
std r7, VCPU_CTR_TM(r9)
|
||||
std r8, VCPU_AMR_TM(r9)
|
||||
std r10, VCPU_TAR_TM(r9)
|
||||
|
||||
/* Restore r12 as trap number. */
|
||||
lwz r12, VCPU_TRAP(r9)
|
||||
|
||||
/* Save FP/VSX. */
|
||||
addi r3, r9, VCPU_FPRS_TM
|
||||
bl .store_fp_state
|
||||
addi r3, r9, VCPU_VRS_TM
|
||||
bl .store_vr_state
|
||||
mfspr r6, SPRN_VRSAVE
|
||||
stw r6, VCPU_VRSAVE_TM(r9)
|
||||
1:
|
||||
/*
|
||||
* We need to save these SPRs after the treclaim so that the software
|
||||
* error code is recorded correctly in the TEXASR. Also the user may
|
||||
* change these outside of a transaction, so they must always be
|
||||
* context switched.
|
||||
*/
|
||||
mfspr r5, SPRN_TFHAR
|
||||
mfspr r6, SPRN_TFIAR
|
||||
mfspr r7, SPRN_TEXASR
|
||||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
std r7, VCPU_TEXASR(r9)
|
||||
2:
|
||||
#endif
|
||||
|
||||
/* Increment yield count if they have a VPA */
|
||||
ld r8, VCPU_VPA(r9) /* do they have a VPA? */
|
||||
cmpdi r8, 0
|
||||
|
@ -1153,7 +1153,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
|
||||
goto free_vcpu;
|
||||
vcpu->arch.book3s = vcpu_book3s;
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
vcpu->arch.shadow_vcpu =
|
||||
kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
|
||||
if (!vcpu->arch.shadow_vcpu)
|
||||
@ -1198,7 +1198,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
|
||||
uninit_vcpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
free_shadow_vcpu:
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
kfree(vcpu->arch.shadow_vcpu);
|
||||
free_vcpu3s:
|
||||
#endif
|
||||
@ -1215,7 +1215,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
|
||||
|
||||
free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
#ifdef CONFIG_KVM_BOOK3S_32
|
||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||
kfree(vcpu->arch.shadow_vcpu);
|
||||
#endif
|
||||
vfree(vcpu_book3s);
|
||||
|
@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
||||
if (overlaps_kernel_text(vaddr, vaddr + step))
|
||||
tprot &= ~HPTE_R_N;
|
||||
|
||||
/* Make kvm guest trampolines executable */
|
||||
if (overlaps_kvm_tmp(vaddr, vaddr + step))
|
||||
tprot &= ~HPTE_R_N;
|
||||
|
||||
/*
|
||||
* If relocatable, check if it overlaps interrupt vectors that
|
||||
* are copied down to real 0. For relocatable kernel
|
||||
|
@ -158,6 +158,7 @@ int kvm_dev_ioctl_check_extension(long ext)
|
||||
case KVM_CAP_ONE_REG:
|
||||
case KVM_CAP_ENABLE_CAP:
|
||||
case KVM_CAP_S390_CSS_SUPPORT:
|
||||
case KVM_CAP_IRQFD:
|
||||
case KVM_CAP_IOEVENTFD:
|
||||
case KVM_CAP_DEVICE_CTRL:
|
||||
case KVM_CAP_ENABLE_CAP_VM:
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef _ASM_X86_PAGE_64_DEFS_H
|
||||
#define _ASM_X86_PAGE_64_DEFS_H
|
||||
|
||||
#define THREAD_SIZE_ORDER 1
|
||||
#define THREAD_SIZE_ORDER 2
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
#define CURRENT_MASK (~(THREAD_SIZE - 1))
|
||||
|
||||
|
@ -7778,7 +7778,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
||||
|
||||
exec_control = vmcs12->pin_based_vm_exec_control;
|
||||
exec_control |= vmcs_config.pin_based_exec_ctrl;
|
||||
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
|
||||
exec_control &= ~(PIN_BASED_VMX_PREEMPTION_TIMER |
|
||||
PIN_BASED_POSTED_INTR);
|
||||
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
|
||||
|
||||
vmx->nested.preemption_timer_expired = false;
|
||||
@ -7815,7 +7816,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
||||
if (!vmx->rdtscp_enabled)
|
||||
exec_control &= ~SECONDARY_EXEC_RDTSCP;
|
||||
/* Take the following fields only from vmcs12 */
|
||||
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
||||
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
||||
SECONDARY_EXEC_APIC_REGISTER_VIRT);
|
||||
if (nested_cpu_has(vmcs12,
|
||||
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
|
||||
exec_control |= vmcs12->secondary_vm_exec_control;
|
||||
|
@ -106,6 +106,8 @@ EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
|
||||
static u32 tsc_tolerance_ppm = 250;
|
||||
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
|
||||
|
||||
static bool backwards_tsc_observed = false;
|
||||
|
||||
#define KVM_NR_SHARED_MSRS 16
|
||||
|
||||
struct kvm_shared_msrs_global {
|
||||
@ -1486,7 +1488,8 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
|
||||
&ka->master_kernel_ns,
|
||||
&ka->master_cycle_now);
|
||||
|
||||
ka->use_master_clock = host_tsc_clocksource & vcpus_matched;
|
||||
ka->use_master_clock = host_tsc_clocksource && vcpus_matched
|
||||
&& !backwards_tsc_observed;
|
||||
|
||||
if (ka->use_master_clock)
|
||||
atomic_set(&kvm_guest_has_master_clock, 1);
|
||||
@ -6945,6 +6948,7 @@ int kvm_arch_hardware_enable(void *garbage)
|
||||
*/
|
||||
if (backwards_tsc) {
|
||||
u64 delta_cyc = max_tsc - local_tsc;
|
||||
backwards_tsc_observed = true;
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
vcpu->arch.tsc_offset_adjustment += delta_cyc;
|
||||
|
@ -155,6 +155,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||
unsigned long addr;
|
||||
int ret = 0;
|
||||
struct vm_area_struct *vma;
|
||||
static struct page *no_pages[] = {NULL};
|
||||
|
||||
#ifdef CONFIG_X86_X32_ABI
|
||||
if (test_thread_flag(TIF_X32))
|
||||
@ -193,7 +194,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||
addr - VDSO_OFFSET(VDSO_PREV_PAGES),
|
||||
VDSO_OFFSET(VDSO_PREV_PAGES),
|
||||
VM_READ,
|
||||
NULL);
|
||||
no_pages);
|
||||
|
||||
if (IS_ERR(vma)) {
|
||||
ret = PTR_ERR(vma);
|
||||
|
@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void)
|
||||
|
||||
static void __exit acpi_thermal_exit(void)
|
||||
{
|
||||
destroy_workqueue(acpi_thermal_pm_queue);
|
||||
acpi_bus_unregister_driver(&acpi_thermal_driver);
|
||||
destroy_workqueue(acpi_thermal_pm_queue);
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -4224,10 +4224,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
|
||||
|
||||
/* devices that don't properly handle queued TRIM commands */
|
||||
{ "Micron_M500*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Crucial_CT???M500SSD*", "MU0[1-4]*", ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
{ "Crucial_CT???M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
|
||||
/*
|
||||
* Some WD SATA-I drives spin up and down erratically when the link
|
||||
|
@ -144,11 +144,11 @@ static void virtblk_done(struct virtqueue *vq)
|
||||
if (unlikely(virtqueue_is_broken(vq)))
|
||||
break;
|
||||
} while (!virtqueue_enable_cb(vq));
|
||||
spin_unlock_irqrestore(&vblk->vq_lock, flags);
|
||||
|
||||
/* In case queue is stopped waiting for more buffers. */
|
||||
if (req_done)
|
||||
blk_mq_start_stopped_hw_queues(vblk->disk->queue);
|
||||
spin_unlock_irqrestore(&vblk->vq_lock, flags);
|
||||
}
|
||||
|
||||
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
|
||||
@ -202,8 +202,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
|
||||
err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num);
|
||||
if (err) {
|
||||
virtqueue_kick(vblk->vq);
|
||||
spin_unlock_irqrestore(&vblk->vq_lock, flags);
|
||||
blk_mq_stop_hw_queue(hctx);
|
||||
spin_unlock_irqrestore(&vblk->vq_lock, flags);
|
||||
/* Out of mem doesn't actually happen, since we fall back
|
||||
* to direct descriptors */
|
||||
if (err == -ENOMEM || err == -ENOSPC)
|
||||
|
@ -147,7 +147,7 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
|
||||
static int _round_up_table(const struct clk_div_table *table, int div)
|
||||
{
|
||||
const struct clk_div_table *clkt;
|
||||
int up = _get_table_maxdiv(table);
|
||||
int up = INT_MAX;
|
||||
|
||||
for (clkt = table; clkt->div; clkt++) {
|
||||
if (clkt->div == div)
|
||||
|
@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name,
|
||||
gate->lock = odf_lock;
|
||||
|
||||
div = kzalloc(sizeof(*div), GFP_KERNEL);
|
||||
if (!div)
|
||||
if (!div) {
|
||||
kfree(gate);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
|
||||
div->reg = reg + pll_data->odf[odf].offset;
|
||||
|
@ -58,9 +58,9 @@
|
||||
#define PLLDU_LFCON_SET_DIVN 600
|
||||
|
||||
#define PLLE_BASE_DIVCML_SHIFT 24
|
||||
#define PLLE_BASE_DIVCML_WIDTH 4
|
||||
#define PLLE_BASE_DIVCML_MASK 0xf
|
||||
#define PLLE_BASE_DIVP_SHIFT 16
|
||||
#define PLLE_BASE_DIVP_WIDTH 7
|
||||
#define PLLE_BASE_DIVP_WIDTH 6
|
||||
#define PLLE_BASE_DIVN_SHIFT 8
|
||||
#define PLLE_BASE_DIVN_WIDTH 8
|
||||
#define PLLE_BASE_DIVM_SHIFT 0
|
||||
@ -183,6 +183,14 @@
|
||||
#define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\
|
||||
mask(p->params->div_nmp->divp_width))
|
||||
|
||||
#define divm_shift(p) (p)->params->div_nmp->divm_shift
|
||||
#define divn_shift(p) (p)->params->div_nmp->divn_shift
|
||||
#define divp_shift(p) (p)->params->div_nmp->divp_shift
|
||||
|
||||
#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p))
|
||||
#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p))
|
||||
#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p))
|
||||
|
||||
#define divm_max(p) (divm_mask(p))
|
||||
#define divn_max(p) (divn_mask(p))
|
||||
#define divp_max(p) (1 << (divp_mask(p)))
|
||||
@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll,
|
||||
} else {
|
||||
val = pll_readl_base(pll);
|
||||
|
||||
val &= ~((divm_mask(pll) << div_nmp->divm_shift) |
|
||||
(divn_mask(pll) << div_nmp->divn_shift) |
|
||||
(divp_mask(pll) << div_nmp->divp_shift));
|
||||
val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) |
|
||||
divp_mask_shifted(pll));
|
||||
|
||||
val |= ((cfg->m << div_nmp->divm_shift) |
|
||||
(cfg->n << div_nmp->divn_shift) |
|
||||
(cfg->p << div_nmp->divp_shift));
|
||||
val |= (cfg->m << divm_shift(pll)) |
|
||||
(cfg->n << divn_shift(pll)) |
|
||||
(cfg->p << divp_shift(pll));
|
||||
|
||||
pll_writel_base(val, pll);
|
||||
}
|
||||
@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw)
|
||||
if (pll->params->flags & TEGRA_PLLE_CONFIGURE) {
|
||||
/* configure dividers */
|
||||
val = pll_readl_base(pll);
|
||||
val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
|
||||
val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
|
||||
val |= sel.m << pll->params->div_nmp->divm_shift;
|
||||
val |= sel.n << pll->params->div_nmp->divn_shift;
|
||||
val |= sel.p << pll->params->div_nmp->divp_shift;
|
||||
val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
|
||||
divm_mask_shifted(pll));
|
||||
val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
|
||||
val |= sel.m << divm_shift(pll);
|
||||
val |= sel.n << divn_shift(pll);
|
||||
val |= sel.p << divp_shift(pll);
|
||||
val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
|
||||
pll_writel_base(val, pll);
|
||||
}
|
||||
@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw)
|
||||
pll_writel_misc(val, pll);
|
||||
|
||||
val = readl(pll->clk_base + PLLE_SS_CTRL);
|
||||
val &= ~PLLE_SS_COEFFICIENTS_MASK;
|
||||
val |= PLLE_SS_DISABLE;
|
||||
writel(val, pll->clk_base + PLLE_SS_CTRL);
|
||||
|
||||
val |= pll_readl_base(pll);
|
||||
val = pll_readl_base(pll);
|
||||
val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
|
||||
pll_writel_base(val, pll);
|
||||
|
||||
@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
|
||||
pll_writel(val, PLLE_SS_CTRL, pll);
|
||||
|
||||
val = pll_readl_base(pll);
|
||||
val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
|
||||
val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
|
||||
val |= sel.m << pll->params->div_nmp->divm_shift;
|
||||
val |= sel.n << pll->params->div_nmp->divn_shift;
|
||||
val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) |
|
||||
divm_mask_shifted(pll));
|
||||
val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT);
|
||||
val |= sel.m << divm_shift(pll);
|
||||
val |= sel.n << divn_shift(pll);
|
||||
val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
|
||||
pll_writel_base(val, pll);
|
||||
udelay(1);
|
||||
@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
|
||||
return clk;
|
||||
}
|
||||
|
||||
static struct div_nmp pll_e_nmp = {
|
||||
.divn_shift = PLLE_BASE_DIVN_SHIFT,
|
||||
.divn_width = PLLE_BASE_DIVN_WIDTH,
|
||||
.divm_shift = PLLE_BASE_DIVM_SHIFT,
|
||||
.divm_width = PLLE_BASE_DIVM_WIDTH,
|
||||
.divp_shift = PLLE_BASE_DIVP_SHIFT,
|
||||
.divp_width = PLLE_BASE_DIVP_WIDTH,
|
||||
};
|
||||
|
||||
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
|
||||
void __iomem *clk_base, void __iomem *pmc,
|
||||
unsigned long flags, struct tegra_clk_pll_params *pll_params,
|
||||
@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
|
||||
|
||||
pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
|
||||
pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
|
||||
|
||||
if (!pll_params->div_nmp)
|
||||
pll_params->div_nmp = &pll_e_nmp;
|
||||
|
||||
pll = _tegra_init_pll(clk_base, pmc, pll_params, lock);
|
||||
if (IS_ERR(pll))
|
||||
return ERR_CAST(pll);
|
||||
@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
|
||||
int m;
|
||||
|
||||
m = _pll_fixed_mdiv(pll_params, parent_rate);
|
||||
val = m << PLL_BASE_DIVM_SHIFT;
|
||||
val |= (pll_params->vco_min / parent_rate)
|
||||
<< PLL_BASE_DIVN_SHIFT;
|
||||
val = m << divm_shift(pll);
|
||||
val |= (pll_params->vco_min / parent_rate) << divn_shift(pll);
|
||||
pll_writel_base(val, pll);
|
||||
}
|
||||
|
||||
|
@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|
||||
|| tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) {
|
||||
__raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
|
||||
__raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
|
||||
clk_disable_unprepare(tcd->clk);
|
||||
clk_disable(tcd->clk);
|
||||
}
|
||||
|
||||
switch (m) {
|
||||
@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|
||||
* of oneshot, we get lower overhead and improved accuracy.
|
||||
*/
|
||||
case CLOCK_EVT_MODE_PERIODIC:
|
||||
clk_prepare_enable(tcd->clk);
|
||||
clk_enable(tcd->clk);
|
||||
|
||||
/* slow clock, count up to RC, then irq and restart */
|
||||
__raw_writel(timer_clock
|
||||
@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
|
||||
break;
|
||||
|
||||
case CLOCK_EVT_MODE_ONESHOT:
|
||||
clk_prepare_enable(tcd->clk);
|
||||
clk_enable(tcd->clk);
|
||||
|
||||
/* slow clock, count up to RC, then irq and stop */
|
||||
__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
|
||||
@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
|
||||
ret = clk_prepare_enable(t2_clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
clk_disable_unprepare(t2_clk);
|
||||
clk_disable(t2_clk);
|
||||
|
||||
clkevt.regs = tc->regs;
|
||||
clkevt.clk = t2_clk;
|
||||
|
@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
|
||||
|
||||
action->dev_id = ce;
|
||||
BUG_ON(setup_irq(ce->irq, action));
|
||||
irq_set_affinity(action->irq, cpumask_of(cpu));
|
||||
irq_force_affinity(action->irq, cpumask_of(cpu));
|
||||
|
||||
clockevents_register_device(ce);
|
||||
return 0;
|
||||
|
@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0");
|
||||
cpu_reg = regulator_get_optional(cpu_dev, "cpu0");
|
||||
if (IS_ERR(cpu_reg)) {
|
||||
/*
|
||||
* If cpu0 regulator supply node is present, but regulator is
|
||||
@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
|
||||
PTR_ERR(cpu_reg));
|
||||
}
|
||||
|
||||
cpu_clk = devm_clk_get(cpu_dev, NULL);
|
||||
cpu_clk = clk_get(cpu_dev, NULL);
|
||||
if (IS_ERR(cpu_clk)) {
|
||||
ret = PTR_ERR(cpu_clk);
|
||||
pr_err("failed to get cpu0 clock: %d\n", ret);
|
||||
goto out_put_node;
|
||||
goto out_put_reg;
|
||||
}
|
||||
|
||||
ret = of_init_opp_table(cpu_dev);
|
||||
if (ret) {
|
||||
pr_err("failed to init OPP table: %d\n", ret);
|
||||
goto out_put_node;
|
||||
goto out_put_clk;
|
||||
}
|
||||
|
||||
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
|
||||
if (ret) {
|
||||
pr_err("failed to init cpufreq table: %d\n", ret);
|
||||
goto out_put_node;
|
||||
goto out_put_clk;
|
||||
}
|
||||
|
||||
of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
|
||||
@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
|
||||
|
||||
out_free_table:
|
||||
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
|
||||
out_put_clk:
|
||||
if (!IS_ERR(cpu_clk))
|
||||
clk_put(cpu_clk);
|
||||
out_put_reg:
|
||||
if (!IS_ERR(cpu_reg))
|
||||
regulator_put(cpu_reg);
|
||||
out_put_node:
|
||||
of_node_put(np);
|
||||
return ret;
|
||||
|
@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
mutex_lock(&dbs_data->mutex);
|
||||
if (!cpu_cdbs->cur_policy) {
|
||||
mutex_unlock(&dbs_data->mutex);
|
||||
break;
|
||||
}
|
||||
mutex_lock(&cpu_cdbs->timer_mutex);
|
||||
if (policy->max < cpu_cdbs->cur_policy->cur)
|
||||
__cpufreq_driver_target(cpu_cdbs->cur_policy,
|
||||
@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
dbs_check_cpu(dbs_data, cpu);
|
||||
mutex_unlock(&cpu_cdbs->timer_mutex);
|
||||
mutex_unlock(&dbs_data->mutex);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref)
|
||||
dma_unmap_page(dev, unmap->addr[i], unmap->len,
|
||||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
cnt = unmap->map_cnt;
|
||||
mempool_free(unmap, __get_unmap_pool(cnt)->pool);
|
||||
}
|
||||
|
||||
@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
|
||||
memset(unmap, 0, sizeof(*unmap));
|
||||
kref_init(&unmap->kref);
|
||||
unmap->dev = dev;
|
||||
unmap->map_cnt = nr;
|
||||
|
||||
return unmap;
|
||||
}
|
||||
|
@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
|
||||
/* Disable BLOCK interrupts as well */
|
||||
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
|
||||
|
||||
err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt,
|
||||
IRQF_SHARED, "dw_dmac", dw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Create a pool of consistent memory blocks for hardware descriptors */
|
||||
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
|
||||
sizeof(struct dw_desc), 4, 0);
|
||||
@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
|
||||
|
||||
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
|
||||
|
||||
err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
|
||||
"dw_dmac", dw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
INIT_LIST_HEAD(&dw->dma.channels);
|
||||
for (i = 0; i < nr_channels; i++) {
|
||||
struct dw_dma_chan *dwc = &dw->chan[i];
|
||||
@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip)
|
||||
dw_dma_off(dw);
|
||||
dma_async_device_unregister(&dw->dma);
|
||||
|
||||
free_irq(chip->irq, dw);
|
||||
tasklet_kill(&dw->tasklet);
|
||||
|
||||
list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
|
||||
|
@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
|
||||
|
||||
static void mv_chan_activate(struct mv_xor_chan *chan)
|
||||
{
|
||||
u32 activation;
|
||||
|
||||
dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
|
||||
activation = readl_relaxed(XOR_ACTIVATION(chan));
|
||||
activation |= 0x1;
|
||||
writel_relaxed(activation, XOR_ACTIVATION(chan));
|
||||
|
||||
/* writel ensures all descriptors are flushed before activation */
|
||||
writel(BIT(0), XOR_ACTIVATION(chan));
|
||||
}
|
||||
|
||||
static char mv_chan_is_busy(struct mv_xor_chan *chan)
|
||||
|
@ -113,11 +113,9 @@ struct sa11x0_dma_phy {
|
||||
struct sa11x0_dma_desc *txd_load;
|
||||
unsigned sg_done;
|
||||
struct sa11x0_dma_desc *txd_done;
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
u32 dbs[2];
|
||||
u32 dbt[2];
|
||||
u32 dcsr;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sa11x0_dma_dev {
|
||||
@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int sa11x0_dma_suspend(struct device *dev)
|
||||
{
|
||||
struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
|
||||
@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops sa11x0_dma_pm_ops = {
|
||||
.suspend_noirq = sa11x0_dma_suspend,
|
||||
|
@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation)
|
||||
|
||||
#define LOCAL_BUS 0xffc0
|
||||
|
||||
/* arbitrarily chosen maximum range for physical DMA: 128 TB */
|
||||
#define FW_MAX_PHYSICAL_RANGE (128ULL << 40)
|
||||
/* OHCI-1394's default upper bound for physical DMA: 4 GB */
|
||||
#define FW_MAX_PHYSICAL_RANGE (1ULL << 32)
|
||||
|
||||
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
|
||||
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
|
||||
|
@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev,
|
||||
version >> 16, version & 0xff, ohci->card.index,
|
||||
ohci->n_ir, ohci->n_it, ohci->quirks,
|
||||
reg_read(ohci, OHCI1394_PhyUpperBound) ?
|
||||
", >4 GB phys DMA" : "");
|
||||
", physUB" : "");
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
flush_workqueue(dev_priv->wq);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_free_all_phys_object(dev);
|
||||
i915_gem_cleanup_ringbuffer(dev);
|
||||
i915_gem_context_fini(dev);
|
||||
WARN_ON(dev_priv->mm.aliasing_ppgtt);
|
||||
|
@ -242,18 +242,6 @@ struct intel_ddi_plls {
|
||||
#define WATCH_LISTS 0
|
||||
#define WATCH_GTT 0
|
||||
|
||||
#define I915_GEM_PHYS_CURSOR_0 1
|
||||
#define I915_GEM_PHYS_CURSOR_1 2
|
||||
#define I915_GEM_PHYS_OVERLAY_REGS 3
|
||||
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
|
||||
|
||||
struct drm_i915_gem_phys_object {
|
||||
int id;
|
||||
struct page **page_list;
|
||||
drm_dma_handle_t *handle;
|
||||
struct drm_i915_gem_object *cur_obj;
|
||||
};
|
||||
|
||||
struct opregion_header;
|
||||
struct opregion_acpi;
|
||||
struct opregion_swsci;
|
||||
@ -1187,9 +1175,6 @@ struct i915_gem_mm {
|
||||
/** Bit 6 swizzling required for Y tiling */
|
||||
uint32_t bit_6_swizzle_y;
|
||||
|
||||
/* storage for physical objects */
|
||||
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
|
||||
|
||||
/* accounting, useful for userland debugging */
|
||||
spinlock_t object_stat_lock;
|
||||
size_t object_memory;
|
||||
@ -1769,7 +1754,7 @@ struct drm_i915_gem_object {
|
||||
struct drm_file *pin_filp;
|
||||
|
||||
/** for phy allocated objects */
|
||||
struct drm_i915_gem_phys_object *phys_obj;
|
||||
drm_dma_handle_t *phys_handle;
|
||||
};
|
||||
|
||||
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
|
||||
@ -2204,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
|
||||
#define PIN_MAPPABLE 0x1
|
||||
#define PIN_NONBLOCK 0x2
|
||||
#define PIN_GLOBAL 0x4
|
||||
#define PIN_OFFSET_BIAS 0x8
|
||||
#define PIN_OFFSET_MASK (~4095)
|
||||
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm,
|
||||
uint32_t alignment,
|
||||
unsigned flags);
|
||||
uint64_t flags);
|
||||
int __must_check i915_vma_unbind(struct i915_vma *vma);
|
||||
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
|
||||
@ -2334,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
||||
u32 alignment,
|
||||
struct intel_ring_buffer *pipelined);
|
||||
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
|
||||
int i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
int id,
|
||||
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
||||
int align);
|
||||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj);
|
||||
void i915_gem_free_all_phys_object(struct drm_device *dev);
|
||||
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
|
||||
|
||||
@ -2465,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
|
||||
int min_size,
|
||||
unsigned alignment,
|
||||
unsigned cache_level,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
unsigned flags);
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||
int i915_gem_evict_everything(struct drm_device *dev);
|
||||
|
@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
|
||||
static __must_check int
|
||||
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
bool readonly);
|
||||
static int i915_gem_phys_pwrite(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file);
|
||||
|
||||
static void i915_gem_write_fence(struct drm_device *dev, int reg,
|
||||
struct drm_i915_gem_object *obj);
|
||||
@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
drm_dma_handle_t *phys = obj->phys_handle;
|
||||
|
||||
if (!phys)
|
||||
return;
|
||||
|
||||
if (obj->madv == I915_MADV_WILLNEED) {
|
||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
char *vaddr = phys->vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page = shmem_read_mapping_page(mapping, i);
|
||||
if (!IS_ERR(page)) {
|
||||
char *dst = kmap_atomic(page);
|
||||
memcpy(dst, vaddr, PAGE_SIZE);
|
||||
drm_clflush_virt_range(dst, PAGE_SIZE);
|
||||
kunmap_atomic(dst);
|
||||
|
||||
set_page_dirty(page);
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
i915_gem_chipset_flush(obj->base.dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
|
||||
#endif
|
||||
drm_pci_free(obj->base.dev, phys);
|
||||
obj->phys_handle = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
|
||||
int align)
|
||||
{
|
||||
drm_dma_handle_t *phys;
|
||||
struct address_space *mapping;
|
||||
char *vaddr;
|
||||
int i;
|
||||
|
||||
if (obj->phys_handle) {
|
||||
if ((unsigned long)obj->phys_handle->vaddr & (align -1))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (obj->madv != I915_MADV_WILLNEED)
|
||||
return -EFAULT;
|
||||
|
||||
if (obj->base.filp == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* create a new object */
|
||||
phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
|
||||
if (!phys)
|
||||
return -ENOMEM;
|
||||
|
||||
vaddr = phys->vaddr;
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
|
||||
#endif
|
||||
mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
char *src;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page)) {
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
|
||||
#endif
|
||||
drm_pci_free(obj->base.dev, phys);
|
||||
return PTR_ERR(page);
|
||||
}
|
||||
|
||||
src = kmap_atomic(page);
|
||||
memcpy(vaddr, src, PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
obj->phys_handle = phys;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
void *vaddr = obj->phys_handle->vaddr + args->offset;
|
||||
char __user *user_data = to_user_ptr(args->data_ptr);
|
||||
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
/* The physical object once assigned is fixed for the lifetime
|
||||
* of the obj, so we can safely drop the lock and continue
|
||||
* to access vaddr.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (unwritten)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
i915_gem_chipset_flush(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *i915_gem_object_alloc(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
* pread/pwrite currently are reading and writing from the CPU
|
||||
* perspective, requiring manual detiling by the client.
|
||||
*/
|
||||
if (obj->phys_obj) {
|
||||
ret = i915_gem_phys_pwrite(dev, obj, args, file);
|
||||
if (obj->phys_handle) {
|
||||
ret = i915_gem_phys_pwrite(obj, args, file);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -3208,12 +3326,14 @@ static struct i915_vma *
|
||||
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm,
|
||||
unsigned alignment,
|
||||
unsigned flags)
|
||||
uint64_t flags)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 size, fence_size, fence_alignment, unfenced_alignment;
|
||||
size_t gtt_max =
|
||||
unsigned long start =
|
||||
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
|
||||
unsigned long end =
|
||||
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
/* If the object is bigger than the entire aperture, reject it early
|
||||
* before evicting everything in a vain attempt to find space.
|
||||
*/
|
||||
if (obj->base.size > gtt_max) {
|
||||
DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
|
||||
if (obj->base.size > end) {
|
||||
DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
|
||||
obj->base.size,
|
||||
flags & PIN_MAPPABLE ? "mappable" : "total",
|
||||
gtt_max);
|
||||
end);
|
||||
return ERR_PTR(-E2BIG);
|
||||
}
|
||||
|
||||
@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
search_free:
|
||||
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
|
||||
size, alignment,
|
||||
obj->cache_level, 0, gtt_max,
|
||||
obj->cache_level,
|
||||
start, end,
|
||||
DRM_MM_SEARCH_DEFAULT,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
if (ret) {
|
||||
ret = i915_gem_evict_something(dev, vm, size, alignment,
|
||||
obj->cache_level, flags);
|
||||
obj->cache_level,
|
||||
start, end,
|
||||
flags);
|
||||
if (ret == 0)
|
||||
goto search_free;
|
||||
|
||||
@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (alignment &&
|
||||
vma->node.start & (alignment - 1))
|
||||
return true;
|
||||
|
||||
if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
|
||||
return true;
|
||||
|
||||
if (flags & PIN_OFFSET_BIAS &&
|
||||
vma->node.start < (flags & PIN_OFFSET_MASK))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm,
|
||||
uint32_t alignment,
|
||||
unsigned flags)
|
||||
uint64_t flags)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
|
||||
return -EBUSY;
|
||||
|
||||
if ((alignment &&
|
||||
vma->node.start & (alignment - 1)) ||
|
||||
(flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
|
||||
if (i915_vma_misplaced(vma, alignment, flags)) {
|
||||
WARN(vma->pin_count,
|
||||
"bo is already pinned with incorrect alignment:"
|
||||
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
|
||||
" obj->map_and_fenceable=%d\n",
|
||||
i915_gem_obj_offset(obj, vm), alignment,
|
||||
flags & PIN_MAPPABLE,
|
||||
!!(flags & PIN_MAPPABLE),
|
||||
obj->map_and_fenceable);
|
||||
ret = i915_vma_unbind(vma);
|
||||
if (ret)
|
||||
@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
|
||||
trace_i915_gem_object_destroy(obj);
|
||||
|
||||
if (obj->phys_obj)
|
||||
i915_gem_detach_phys_object(dev, obj);
|
||||
|
||||
list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
|
||||
int ret;
|
||||
|
||||
@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_object_detach_phys(obj);
|
||||
|
||||
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
|
||||
* before progressing. */
|
||||
if (obj->stolen)
|
||||
@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev)
|
||||
register_shrinker(&dev_priv->mm.inactive_shrinker);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a physically contiguous memory object for this object
|
||||
* e.g. for cursor + overlay regs
|
||||
*/
|
||||
static int i915_gem_init_phys_object(struct drm_device *dev,
|
||||
int id, int size, int align)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_phys_object *phys_obj;
|
||||
int ret;
|
||||
|
||||
if (dev_priv->mm.phys_objs[id - 1] || !size)
|
||||
return 0;
|
||||
|
||||
phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
|
||||
if (!phys_obj)
|
||||
return -ENOMEM;
|
||||
|
||||
phys_obj->id = id;
|
||||
|
||||
phys_obj->handle = drm_pci_alloc(dev, size, align);
|
||||
if (!phys_obj->handle) {
|
||||
ret = -ENOMEM;
|
||||
goto kfree_obj;
|
||||
}
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
||||
#endif
|
||||
|
||||
dev_priv->mm.phys_objs[id - 1] = phys_obj;
|
||||
|
||||
return 0;
|
||||
kfree_obj:
|
||||
kfree(phys_obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void i915_gem_free_phys_object(struct drm_device *dev, int id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_phys_object *phys_obj;
|
||||
|
||||
if (!dev_priv->mm.phys_objs[id - 1])
|
||||
return;
|
||||
|
||||
phys_obj = dev_priv->mm.phys_objs[id - 1];
|
||||
if (phys_obj->cur_obj) {
|
||||
i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
|
||||
#endif
|
||||
drm_pci_free(dev, phys_obj->handle);
|
||||
kfree(phys_obj);
|
||||
dev_priv->mm.phys_objs[id - 1] = NULL;
|
||||
}
|
||||
|
||||
void i915_gem_free_all_phys_object(struct drm_device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
|
||||
i915_gem_free_phys_object(dev, i);
|
||||
}
|
||||
|
||||
void i915_gem_detach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
char *vaddr;
|
||||
int i;
|
||||
int page_count;
|
||||
|
||||
if (!obj->phys_obj)
|
||||
return;
|
||||
vaddr = obj->phys_obj->handle->vaddr;
|
||||
|
||||
page_count = obj->base.size / PAGE_SIZE;
|
||||
for (i = 0; i < page_count; i++) {
|
||||
struct page *page = shmem_read_mapping_page(mapping, i);
|
||||
if (!IS_ERR(page)) {
|
||||
char *dst = kmap_atomic(page);
|
||||
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
|
||||
kunmap_atomic(dst);
|
||||
|
||||
drm_clflush_pages(&page, 1);
|
||||
|
||||
set_page_dirty(page);
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
}
|
||||
i915_gem_chipset_flush(dev);
|
||||
|
||||
obj->phys_obj->cur_obj = NULL;
|
||||
obj->phys_obj = NULL;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_attach_phys_object(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
int id,
|
||||
int align)
|
||||
{
|
||||
struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
int page_count;
|
||||
int i;
|
||||
|
||||
if (id > I915_MAX_PHYS_OBJECT)
|
||||
return -EINVAL;
|
||||
|
||||
if (obj->phys_obj) {
|
||||
if (obj->phys_obj->id == id)
|
||||
return 0;
|
||||
i915_gem_detach_phys_object(dev, obj);
|
||||
}
|
||||
|
||||
/* create a new object */
|
||||
if (!dev_priv->mm.phys_objs[id - 1]) {
|
||||
ret = i915_gem_init_phys_object(dev, id,
|
||||
obj->base.size, align);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to init phys object %d size: %zu\n",
|
||||
id, obj->base.size);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* bind to the object */
|
||||
obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
|
||||
obj->phys_obj->cur_obj = obj;
|
||||
|
||||
page_count = obj->base.size / PAGE_SIZE;
|
||||
|
||||
for (i = 0; i < page_count; i++) {
|
||||
struct page *page;
|
||||
char *dst, *src;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page))
|
||||
return PTR_ERR(page);
|
||||
|
||||
src = kmap_atomic(page);
|
||||
dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
|
||||
mark_page_accessed(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_phys_pwrite(struct drm_device *dev,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
|
||||
char __user *user_data = to_user_ptr(args->data_ptr);
|
||||
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
/* The physical object once assigned is fixed for the lifetime
|
||||
* of the obj, so we can safely drop the lock and continue
|
||||
* to access vaddr.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (unwritten)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
i915_gem_chipset_flush(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
|
||||
int
|
||||
i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
||||
int min_size, unsigned alignment, unsigned cache_level,
|
||||
unsigned long start, unsigned long end,
|
||||
unsigned flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct list_head eviction_list, unwind_list;
|
||||
struct i915_vma *vma;
|
||||
int ret = 0;
|
||||
@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
|
||||
*/
|
||||
|
||||
INIT_LIST_HEAD(&unwind_list);
|
||||
if (flags & PIN_MAPPABLE) {
|
||||
BUG_ON(!i915_is_ggtt(vm));
|
||||
if (start != 0 || end != vm->total) {
|
||||
drm_mm_init_scan_with_range(&vm->mm, min_size,
|
||||
alignment, cache_level, 0,
|
||||
dev_priv->gtt.mappable_end);
|
||||
alignment, cache_level,
|
||||
start, end);
|
||||
} else
|
||||
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
|
||||
|
||||
|
@ -35,6 +35,9 @@
|
||||
|
||||
#define __EXEC_OBJECT_HAS_PIN (1<<31)
|
||||
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
|
||||
#define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
|
||||
|
||||
#define BATCH_OFFSET_BIAS (256*1024)
|
||||
|
||||
struct eb_vmas {
|
||||
struct list_head vmas;
|
||||
@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
|
||||
bool need_fence;
|
||||
unsigned flags;
|
||||
uint64_t flags;
|
||||
int ret;
|
||||
|
||||
flags = 0;
|
||||
@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
|
||||
if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
|
||||
flags |= PIN_GLOBAL;
|
||||
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
|
||||
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
|
||||
|
||||
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
|
||||
if (ret)
|
||||
@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
|
||||
{
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
bool need_fence, need_mappable;
|
||||
|
||||
need_fence =
|
||||
has_fenced_gpu_access &&
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
WARN_ON((need_mappable || need_fence) &&
|
||||
!i915_is_ggtt(vma->vm));
|
||||
|
||||
if (entry->alignment &&
|
||||
vma->node.start & (entry->alignment - 1))
|
||||
return true;
|
||||
|
||||
if (need_mappable && !obj->map_and_fenceable)
|
||||
return true;
|
||||
|
||||
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
|
||||
vma->node.start < BATCH_OFFSET_BIAS)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
struct list_head *vmas,
|
||||
@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
|
||||
|
||||
/* Unbind any ill-fitting objects or pin. */
|
||||
list_for_each_entry(vma, vmas, exec_list) {
|
||||
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
|
||||
bool need_fence, need_mappable;
|
||||
|
||||
obj = vma->obj;
|
||||
|
||||
if (!drm_mm_node_allocated(&vma->node))
|
||||
continue;
|
||||
|
||||
need_fence =
|
||||
has_fenced_gpu_access &&
|
||||
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
|
||||
obj->tiling_mode != I915_TILING_NONE;
|
||||
need_mappable = need_fence || need_reloc_mappable(vma);
|
||||
|
||||
WARN_ON((need_mappable || need_fence) &&
|
||||
!i915_is_ggtt(vma->vm));
|
||||
|
||||
if ((entry->alignment &&
|
||||
vma->node.start & (entry->alignment - 1)) ||
|
||||
(need_mappable && !obj->map_and_fenceable))
|
||||
if (eb_vma_misplaced(vma, has_fenced_gpu_access))
|
||||
ret = i915_vma_unbind(vma);
|
||||
else
|
||||
ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
|
||||
@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
|
||||
* relocations were valid.
|
||||
*/
|
||||
for (j = 0; j < exec[i].relocation_count; j++) {
|
||||
if (copy_to_user(&user_relocs[j].presumed_offset,
|
||||
&invalid_offset,
|
||||
sizeof(invalid_offset))) {
|
||||
if (__copy_to_user(&user_relocs[j].presumed_offset,
|
||||
&invalid_offset,
|
||||
sizeof(invalid_offset))) {
|
||||
ret = -EFAULT;
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
goto err;
|
||||
@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
eb_get_batch(struct eb_vmas *eb)
|
||||
{
|
||||
struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
|
||||
|
||||
/*
|
||||
* SNA is doing fancy tricks with compressing batch buffers, which leads
|
||||
* to negative relocation deltas. Usually that works out ok since the
|
||||
* relocate address is still positive, except when the batch is placed
|
||||
* very low in the GTT. Ensure this doesn't happen.
|
||||
*
|
||||
* Note that actual hangs have only been observed on gen7, but for
|
||||
* paranoia do it everywhere.
|
||||
*/
|
||||
vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
|
||||
|
||||
return vma->obj;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
struct drm_file *file,
|
||||
@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
goto err;
|
||||
|
||||
/* take note of the batch buffer before we might reorder the lists */
|
||||
batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
|
||||
batch_obj = eb_get_batch(eb);
|
||||
|
||||
/* Move the objects en-masse into the GTT, evicting if necessary. */
|
||||
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
|
||||
@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
|
||||
|
||||
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
|
||||
if (!ret) {
|
||||
struct drm_i915_gem_exec_object __user *user_exec_list =
|
||||
to_user_ptr(args->buffers_ptr);
|
||||
|
||||
/* Copy the new buffer offsets back to the user's exec list. */
|
||||
for (i = 0; i < args->buffer_count; i++)
|
||||
exec_list[i].offset = exec2_list[i].offset;
|
||||
/* ... and back out to userspace */
|
||||
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
|
||||
exec_list,
|
||||
sizeof(*exec_list) * args->buffer_count);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
DRM_DEBUG("failed to copy %d exec entries "
|
||||
"back to user (%d)\n",
|
||||
args->buffer_count, ret);
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
ret = __copy_to_user(&user_exec_list[i].offset,
|
||||
&exec2_list[i].offset,
|
||||
sizeof(user_exec_list[i].offset));
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
DRM_DEBUG("failed to copy %d exec entries "
|
||||
"back to user (%d)\n",
|
||||
args->buffer_count, ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
|
||||
ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
|
||||
if (!ret) {
|
||||
/* Copy the new buffer offsets back to the user's exec list. */
|
||||
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
|
||||
exec2_list,
|
||||
sizeof(*exec2_list) * args->buffer_count);
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
DRM_DEBUG("failed to copy %d exec entries "
|
||||
"back to user (%d)\n",
|
||||
args->buffer_count, ret);
|
||||
struct drm_i915_gem_exec_object2 *user_exec_list =
|
||||
to_user_ptr(args->buffers_ptr);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < args->buffer_count; i++) {
|
||||
ret = __copy_to_user(&user_exec_list[i].offset,
|
||||
&exec2_list[i].offset,
|
||||
sizeof(user_exec_list[i].offset));
|
||||
if (ret) {
|
||||
ret = -EFAULT;
|
||||
DRM_DEBUG("failed to copy %d exec entries "
|
||||
"back to user\n",
|
||||
args->buffer_count);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1089,7 +1089,9 @@ alloc:
|
||||
if (ret == -ENOSPC && !retried) {
|
||||
ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
|
||||
GEN6_PD_SIZE, GEN6_PD_ALIGN,
|
||||
I915_CACHE_NONE, 0);
|
||||
I915_CACHE_NONE,
|
||||
0, dev_priv->gtt.base.total,
|
||||
0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
addr = i915_gem_obj_ggtt_offset(obj);
|
||||
} else {
|
||||
int align = IS_I830(dev) ? 16 * 1024 : 256;
|
||||
ret = i915_gem_attach_phys_object(dev, obj,
|
||||
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
|
||||
align);
|
||||
ret = i915_gem_object_attach_phys(obj, align);
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to attach phys object\n");
|
||||
goto fail_locked;
|
||||
}
|
||||
addr = obj->phys_obj->handle->busaddr;
|
||||
addr = obj->phys_handle->busaddr;
|
||||
}
|
||||
|
||||
if (IS_GEN2(dev))
|
||||
@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
|
||||
finish:
|
||||
if (intel_crtc->cursor_bo) {
|
||||
if (INTEL_INFO(dev)->cursor_needs_physical) {
|
||||
if (intel_crtc->cursor_bo != obj)
|
||||
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
|
||||
} else
|
||||
if (!INTEL_INFO(dev)->cursor_needs_physical)
|
||||
i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
|
||||
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
|
||||
struct overlay_registers __iomem *regs;
|
||||
|
||||
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
||||
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
|
||||
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
|
||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||
@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev)
|
||||
overlay->reg_bo = reg_bo;
|
||||
|
||||
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
|
||||
ret = i915_gem_attach_phys_object(dev, reg_bo,
|
||||
I915_GEM_PHYS_OVERLAY_REGS,
|
||||
PAGE_SIZE);
|
||||
ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to attach phys overlay regs\n");
|
||||
goto out_free_bo;
|
||||
}
|
||||
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
|
||||
overlay->flip_addr = reg_bo->phys_handle->busaddr;
|
||||
} else {
|
||||
ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
|
||||
if (ret) {
|
||||
@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
|
||||
/* Cast to make sparse happy, but it's wc memory anyway, so
|
||||
* equivalent to the wc io mapping on X86. */
|
||||
regs = (struct overlay_registers __iomem *)
|
||||
overlay->reg_bo->phys_obj->handle->vaddr;
|
||||
overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
|
||||
i915_gem_obj_ggtt_offset(overlay->reg_bo));
|
||||
@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
|
||||
error->dovsta = I915_READ(DOVSTA);
|
||||
error->isr = I915_READ(ISR);
|
||||
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
|
||||
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
|
||||
error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
|
||||
else
|
||||
error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
|
||||
|
||||
|
@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||
uint32_t domain = r->write_domain ?
|
||||
r->write_domain : r->read_domains;
|
||||
|
||||
if (domain & RADEON_GEM_DOMAIN_CPU) {
|
||||
DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
|
||||
"for command submission\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
p->relocs[i].domain = domain;
|
||||
if (domain == RADEON_GEM_DOMAIN_VRAM)
|
||||
domain |= RADEON_GEM_DOMAIN_GTT;
|
||||
@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||
return -EINVAL;
|
||||
|
||||
/* we only support VM on some SI+ rings */
|
||||
if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
|
||||
((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
|
||||
DRM_ERROR("Ring %d requires VM!\n", p->ring);
|
||||
return -EINVAL;
|
||||
if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
|
||||
if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
|
||||
DRM_ERROR("Ring %d requires VM!\n", p->ring);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
|
||||
DRM_ERROR("VM not supported on ring %d!\n",
|
||||
p->ring);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1533,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
||||
|
||||
radeon_restore_bios_scratch_regs(rdev);
|
||||
|
||||
if (fbcon) {
|
||||
radeon_fbdev_set_suspend(rdev, 0);
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
/* init dig PHYs, disp eng pll */
|
||||
if (rdev->is_atom_bios) {
|
||||
radeon_atom_encoder_init(rdev);
|
||||
@ -1562,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
|
||||
}
|
||||
|
||||
drm_kms_helper_poll_enable(dev);
|
||||
|
||||
if (fbcon) {
|
||||
radeon_fbdev_set_suspend(rdev, 0);
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -862,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
||||
unsigned *fb_div, unsigned *ref_div)
|
||||
{
|
||||
/* limit reference * post divider to a maximum */
|
||||
ref_div_max = min(128 / post_div, ref_div_max);
|
||||
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
|
||||
|
||||
/* get matching reference and feedback divider */
|
||||
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
||||
|
@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct radeon_cs_reloc *list;
|
||||
unsigned i, idx, size;
|
||||
unsigned i, idx;
|
||||
|
||||
size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc);
|
||||
list = kmalloc(size, GFP_KERNEL);
|
||||
list = kmalloc_array(vm->max_pde_used + 1,
|
||||
sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
||||
if (!list)
|
||||
return NULL;
|
||||
|
||||
|
@ -1053,7 +1053,7 @@ config SENSORS_PC87427
|
||||
|
||||
config SENSORS_NTC_THERMISTOR
|
||||
tristate "NTC thermistor support"
|
||||
depends on (!OF && !IIO) || (OF && IIO)
|
||||
depends on !OF || IIO=n || IIO
|
||||
help
|
||||
This driver supports NTC thermistors sensor reading and its
|
||||
interpretation. The driver can also monitor the temperature and
|
||||
|
@ -44,6 +44,7 @@ struct ntc_compensation {
|
||||
unsigned int ohm;
|
||||
};
|
||||
|
||||
/* Order matters, ntc_match references the entries by index */
|
||||
static const struct platform_device_id ntc_thermistor_id[] = {
|
||||
{ "ncp15wb473", TYPE_NCPXXWB473 },
|
||||
{ "ncp18wb473", TYPE_NCPXXWB473 },
|
||||
@ -141,7 +142,7 @@ struct ntc_data {
|
||||
char name[PLATFORM_NAME_SIZE];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
|
||||
static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
|
||||
{
|
||||
struct iio_channel *channel = pdata->chan;
|
||||
@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
|
||||
|
||||
static const struct of_device_id ntc_match[] = {
|
||||
{ .compatible = "ntc,ncp15wb473",
|
||||
.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
|
||||
.data = &ntc_thermistor_id[0] },
|
||||
{ .compatible = "ntc,ncp18wb473",
|
||||
.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
|
||||
.data = &ntc_thermistor_id[1] },
|
||||
{ .compatible = "ntc,ncp21wb473",
|
||||
.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
|
||||
.data = &ntc_thermistor_id[2] },
|
||||
{ .compatible = "ntc,ncp03wb473",
|
||||
.data = &ntc_thermistor_id[TYPE_NCPXXWB473] },
|
||||
.data = &ntc_thermistor_id[3] },
|
||||
{ .compatible = "ntc,ncp15wl333",
|
||||
.data = &ntc_thermistor_id[TYPE_NCPXXWL333] },
|
||||
.data = &ntc_thermistor_id[4] },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ntc_match);
|
||||
@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define ntc_match NULL
|
||||
|
||||
static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata)
|
||||
{ }
|
||||
#endif
|
||||
|
@ -71,7 +71,7 @@ config KEYBOARD_ATKBD
|
||||
default y
|
||||
select SERIO
|
||||
select SERIO_LIBPS2
|
||||
select SERIO_I8042 if X86
|
||||
select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select SERIO_GSCPS2 if GSC
|
||||
help
|
||||
Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
|
||||
|
@ -111,6 +111,8 @@ struct pxa27x_keypad {
|
||||
unsigned short keycodes[MAX_KEYPAD_KEYS];
|
||||
int rotary_rel_code[2];
|
||||
|
||||
unsigned int row_shift;
|
||||
|
||||
/* state row bits of each column scan */
|
||||
uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
|
||||
uint32_t direct_key_state;
|
||||
@ -467,7 +469,8 @@ scan:
|
||||
if ((bits_changed & (1 << row)) == 0)
|
||||
continue;
|
||||
|
||||
code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT);
|
||||
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
|
||||
|
||||
input_event(input_dev, EV_MSC, MSC_SCAN, code);
|
||||
input_report_key(input_dev, keypad->keycodes[code],
|
||||
new_state[col] & (1 << row));
|
||||
@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
|
||||
goto failed_put_clk;
|
||||
}
|
||||
|
||||
keypad->row_shift = get_count_order(pdata->matrix_key_cols);
|
||||
|
||||
if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
|
||||
(pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
|
||||
input_dev->evbit[0] |= BIT_MASK(EV_REL);
|
||||
|
@ -17,7 +17,7 @@ config MOUSE_PS2
|
||||
default y
|
||||
select SERIO
|
||||
select SERIO_LIBPS2
|
||||
select SERIO_I8042 if X86
|
||||
select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select SERIO_GSCPS2 if GSC
|
||||
help
|
||||
Say Y here if you have a PS/2 mouse connected to your system. This
|
||||
|
@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
|
||||
struct min_max_quirk {
|
||||
const char * const *pnp_ids;
|
||||
int x_min, x_max, y_min, y_max;
|
||||
};
|
||||
|
||||
static const struct min_max_quirk min_max_pnpid_table[] = {
|
||||
{
|
||||
(const char * const []){"LEN0033", NULL},
|
||||
1024, 5052, 2258, 4832
|
||||
},
|
||||
{
|
||||
(const char * const []){"LEN0035", "LEN0042", NULL},
|
||||
1232, 5710, 1156, 4696
|
||||
},
|
||||
{
|
||||
(const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
|
||||
1024, 5112, 2024, 4832
|
||||
},
|
||||
{
|
||||
(const char * const []){"LEN2001", NULL},
|
||||
1024, 5022, 2508, 4832
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
/* This list has been kindly provided by Synaptics. */
|
||||
static const char * const topbuttonpad_pnp_ids[] = {
|
||||
"LEN0017",
|
||||
@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
|
||||
"LEN002D",
|
||||
"LEN002E",
|
||||
"LEN0033", /* Helix */
|
||||
"LEN0034", /* T431s, T540, X1 Carbon 2nd */
|
||||
"LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
|
||||
"LEN0035", /* X240 */
|
||||
"LEN0036", /* T440 */
|
||||
"LEN0037",
|
||||
@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
|
||||
"LEN0048",
|
||||
"LEN0049",
|
||||
"LEN2000",
|
||||
"LEN2001",
|
||||
"LEN2001", /* Edge E431 */
|
||||
"LEN2002",
|
||||
"LEN2003",
|
||||
"LEN2004", /* L440 */
|
||||
@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[])
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4))
|
||||
for (i = 0; ids[i]; i++)
|
||||
if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i]))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
* Synaptics communications functions
|
||||
****************************************************************************/
|
||||
@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse)
|
||||
* Resolution is left zero if touchpad does not support the query
|
||||
*/
|
||||
|
||||
static const int *quirk_min_max;
|
||||
|
||||
static int synaptics_resolution(struct psmouse *psmouse)
|
||||
{
|
||||
struct synaptics_data *priv = psmouse->private;
|
||||
unsigned char resp[3];
|
||||
int i;
|
||||
|
||||
if (quirk_min_max) {
|
||||
priv->x_min = quirk_min_max[0];
|
||||
priv->x_max = quirk_min_max[1];
|
||||
priv->y_min = quirk_min_max[2];
|
||||
priv->y_max = quirk_min_max[3];
|
||||
return 0;
|
||||
}
|
||||
for (i = 0; min_max_pnpid_table[i].pnp_ids; i++)
|
||||
if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) {
|
||||
priv->x_min = min_max_pnpid_table[i].x_min;
|
||||
priv->x_max = min_max_pnpid_table[i].x_max;
|
||||
priv->y_min = min_max_pnpid_table[i].y_min;
|
||||
priv->y_max = min_max_pnpid_table[i].y_max;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (SYN_ID_MAJOR(priv->identity) < 4)
|
||||
return 0;
|
||||
@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse,
|
||||
|
||||
if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) {
|
||||
__set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
|
||||
/* See if this buttonpad has a top button area */
|
||||
if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) {
|
||||
for (i = 0; topbuttonpad_pnp_ids[i]; i++) {
|
||||
if (strstr(psmouse->ps2dev.serio->firmware_id,
|
||||
topbuttonpad_pnp_ids[i])) {
|
||||
__set_bit(INPUT_PROP_TOPBUTTONPAD,
|
||||
dev->propbit);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids))
|
||||
__set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit);
|
||||
/* Clickpads report only left button */
|
||||
__clear_bit(BTN_RIGHT, dev->keybit);
|
||||
__clear_bit(BTN_MIDDLE, dev->keybit);
|
||||
@ -1547,104 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct dmi_system_id min_max_dmi_table[] __initconst = {
|
||||
#if defined(CONFIG_DMI)
|
||||
{
|
||||
/* Lenovo ThinkPad Helix */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5052, 2258, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad X240 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
|
||||
},
|
||||
.driver_data = (int []){1232, 5710, 1156, 4696},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad Edge E431 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Edge E431"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5022, 2508, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad T431s */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad T440s */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad L440 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad T540p */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5056, 2058, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad L540 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
{
|
||||
/* Lenovo Yoga S1 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
|
||||
"ThinkPad S1 Yoga"),
|
||||
},
|
||||
.driver_data = (int []){1232, 5710, 1156, 4696},
|
||||
},
|
||||
{
|
||||
/* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION,
|
||||
"ThinkPad X1 Carbon 2nd"),
|
||||
},
|
||||
.driver_data = (int []){1024, 5112, 2024, 4832},
|
||||
},
|
||||
#endif
|
||||
{ }
|
||||
};
|
||||
|
||||
void __init synaptics_module_init(void)
|
||||
{
|
||||
const struct dmi_system_id *min_max_dmi;
|
||||
|
||||
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
|
||||
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
|
||||
|
||||
min_max_dmi = dmi_first_match(min_max_dmi_table);
|
||||
if (min_max_dmi)
|
||||
quirk_min_max = min_max_dmi->driver_data;
|
||||
}
|
||||
|
||||
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
|
||||
|
@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io)
|
||||
writeb(divisor, KMICLKDIV);
|
||||
writeb(KMICR_EN, KMICR);
|
||||
|
||||
ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi);
|
||||
ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050",
|
||||
kmi);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq);
|
||||
writeb(0, KMICR);
|
||||
|
@ -640,7 +640,7 @@ config TOUCHSCREEN_WM9713
|
||||
|
||||
config TOUCHSCREEN_WM97XX_ATMEL
|
||||
tristate "WM97xx Atmel accelerated touch"
|
||||
depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91)
|
||||
depends on TOUCHSCREEN_WM97XX && AVR32
|
||||
help
|
||||
Say Y here for support for streaming mode with WM97xx touchscreens
|
||||
on Atmel AT91 or AVR32 systems with an AC97C module.
|
||||
|
@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
ti->num_discard_bios = 1;
|
||||
ti->discards_supported = true;
|
||||
ti->discard_zeroes_data_unsupported = true;
|
||||
/* Discard bios must be split on a block boundary */
|
||||
ti->split_discard_bios = true;
|
||||
|
||||
cache->features = ca->features;
|
||||
ti->per_bio_data_size = get_per_bio_data_size(cache);
|
||||
|
@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path,
|
||||
else
|
||||
m->saved_queue_if_no_path = queue_if_no_path;
|
||||
m->queue_if_no_path = queue_if_no_path;
|
||||
if (!m->queue_if_no_path)
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
|
||||
if (!queue_if_no_path)
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -954,7 +954,7 @@ out:
|
||||
*/
|
||||
static int reinstate_path(struct pgpath *pgpath)
|
||||
{
|
||||
int r = 0;
|
||||
int r = 0, run_queue = 0;
|
||||
unsigned long flags;
|
||||
struct multipath *m = pgpath->pg->m;
|
||||
|
||||
@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath)
|
||||
|
||||
if (!m->nr_valid_paths++) {
|
||||
m->current_pgpath = NULL;
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
run_queue = 1;
|
||||
} else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
|
||||
if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
|
||||
m->pg_init_in_progress++;
|
||||
@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath)
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
if (run_queue)
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -1566,8 +1568,8 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
|
||||
}
|
||||
if (m->pg_init_required)
|
||||
__pg_init_all_paths(m);
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
spin_unlock_irqrestore(&m->lock, flags);
|
||||
dm_table_run_md_queue_async(m->ti->table);
|
||||
}
|
||||
|
||||
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user