Merge branch 'topic/snd-device' into for-next

This commit is contained in:
Takashi Iwai 2015-02-03 17:57:16 +01:00
commit 3fe9cf390f
238 changed files with 2633 additions and 1600 deletions

View File

@ -31,7 +31,7 @@ i2c0: i2c@fed40000 {
compatible = "st,comms-ssc4-i2c"; compatible = "st,comms-ssc4-i2c";
reg = <0xfed40000 0x110>; reg = <0xfed40000 0x110>;
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&CLK_S_ICN_REG_0>; clocks = <&clk_s_a0_ls CLK_ICN_REG>;
clock-names = "ssc"; clock-names = "ssc";
clock-frequency = <400000>; clock-frequency = <400000>;
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -47,6 +47,7 @@ dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM
dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O
dallas,ds75 Digital Thermometer and Thermostat dallas,ds75 Digital Thermometer and Thermostat
dlg,da9053 DA9053: flexible system level PMIC with multicore support dlg,da9053 DA9053: flexible system level PMIC with multicore support
dlg,da9063 DA9063: system PMIC for quad-core application processors
epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer

View File

@ -708,6 +708,16 @@ X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad* F: drivers/staging/iio/*/ad*
F: staging/iio/trigger/iio-trig-bfin-timer.c F: staging/iio/trigger/iio-trig-bfin-timer.c
ANDROID DRIVERS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Arve Hjønnevåg <arve@android.com>
M: Riley Andrews <riandrews@android.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
L: devel@driverdev.osuosl.org
S: Supported
F: drivers/android/
F: drivers/staging/android/
AOA (Apple Onboard Audio) ALSA DRIVER AOA (Apple Onboard Audio) ALSA DRIVER
M: Johannes Berg <johannes@sipsolutions.net> M: Johannes Berg <johannes@sipsolutions.net>
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
@ -10166,6 +10176,7 @@ USERSPACE I/O (UIO)
M: "Hans J. Koch" <hjk@hansjkoch.de> M: "Hans J. Koch" <hjk@hansjkoch.de>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: Documentation/DocBook/uio-howto.tmpl F: Documentation/DocBook/uio-howto.tmpl
F: drivers/uio/ F: drivers/uio/
F: include/linux/uio*.h F: include/linux/uio*.h

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 19 PATCHLEVEL = 19
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Diseased Newt NAME = Diseased Newt
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -156,6 +156,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -161,6 +161,8 @@ good_area:
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;

View File

@ -166,12 +166,12 @@
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
ethphy1: ethernet-phy@0 { ethphy1: ethernet-phy@1 {
reg = <0>; reg = <1>;
}; };
ethphy2: ethernet-phy@1 { ethphy2: ethernet-phy@2 {
reg = <1>; reg = <2>;
}; };
}; };
}; };

View File

@ -17,14 +17,6 @@
aliases { aliases {
ethernet0 = &emac; ethernet0 = &emac;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
serial6 = &uart6;
serial7 = &uart7;
}; };
chosen { chosen {
@ -39,6 +31,14 @@
<&ahb_gates 44>; <&ahb_gates 44>;
status = "disabled"; status = "disabled";
}; };
framebuffer@1 {
compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
<&ahb_gates 44>, <&ahb_gates 46>;
status = "disabled";
};
}; };
cpus { cpus {
@ -438,8 +438,8 @@
reg-names = "phy_ctrl", "pmu1", "pmu2"; reg-names = "phy_ctrl", "pmu1", "pmu2";
clocks = <&usb_clk 8>; clocks = <&usb_clk 8>;
clock-names = "usb_phy"; clock-names = "usb_phy";
resets = <&usb_clk 1>, <&usb_clk 2>; resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
reset-names = "usb1_reset", "usb2_reset"; reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
status = "disabled"; status = "disabled";
}; };

View File

@ -55,6 +55,12 @@
model = "Olimex A10s-Olinuxino Micro"; model = "Olimex A10s-Olinuxino Micro";
compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s"; compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
aliases {
serial0 = &uart0;
serial1 = &uart2;
serial2 = &uart3;
};
soc@01c00000 { soc@01c00000 {
emac: ethernet@01c0b000 { emac: ethernet@01c0b000 {
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -18,10 +18,6 @@
aliases { aliases {
ethernet0 = &emac; ethernet0 = &emac;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
}; };
chosen { chosen {
@ -390,8 +386,8 @@
reg-names = "phy_ctrl", "pmu1"; reg-names = "phy_ctrl", "pmu1";
clocks = <&usb_clk 8>; clocks = <&usb_clk 8>;
clock-names = "usb_phy"; clock-names = "usb_phy";
resets = <&usb_clk 1>; resets = <&usb_clk 0>, <&usb_clk 1>;
reset-names = "usb1_reset"; reset-names = "usb0_reset", "usb1_reset";
status = "disabled"; status = "disabled";
}; };

View File

@ -53,6 +53,10 @@
model = "HSG H702"; model = "HSG H702";
compatible = "hsg,h702", "allwinner,sun5i-a13"; compatible = "hsg,h702", "allwinner,sun5i-a13";
aliases {
serial0 = &uart1;
};
soc@01c00000 { soc@01c00000 {
mmc0: mmc@01c0f000 { mmc0: mmc@01c0f000 {
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -54,6 +54,10 @@
model = "Olimex A13-Olinuxino Micro"; model = "Olimex A13-Olinuxino Micro";
compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13"; compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
aliases {
serial0 = &uart1;
};
soc@01c00000 { soc@01c00000 {
mmc0: mmc@01c0f000 { mmc0: mmc@01c0f000 {
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -55,6 +55,10 @@
model = "Olimex A13-Olinuxino"; model = "Olimex A13-Olinuxino";
compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13"; compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
aliases {
serial0 = &uart1;
};
soc@01c00000 { soc@01c00000 {
mmc0: mmc@01c0f000 { mmc0: mmc@01c0f000 {
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -16,11 +16,6 @@
/ { / {
interrupt-parent = <&intc>; interrupt-parent = <&intc>;
aliases {
serial0 = &uart1;
serial1 = &uart3;
};
cpus { cpus {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
@ -349,8 +344,8 @@
reg-names = "phy_ctrl", "pmu1"; reg-names = "phy_ctrl", "pmu1";
clocks = <&usb_clk 8>; clocks = <&usb_clk 8>;
clock-names = "usb_phy"; clock-names = "usb_phy";
resets = <&usb_clk 1>; resets = <&usb_clk 0>, <&usb_clk 1>;
reset-names = "usb1_reset"; reset-names = "usb0_reset", "usb1_reset";
status = "disabled"; status = "disabled";
}; };

View File

@ -53,12 +53,6 @@
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
aliases { aliases {
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
ethernet0 = &gmac; ethernet0 = &gmac;
}; };

View File

@ -55,6 +55,12 @@
model = "LeMaker Banana Pi"; model = "LeMaker Banana Pi";
compatible = "lemaker,bananapi", "allwinner,sun7i-a20"; compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
aliases {
serial0 = &uart0;
serial1 = &uart3;
serial2 = &uart7;
};
soc@01c00000 { soc@01c00000 {
spi0: spi@01c05000 { spi0: spi@01c05000 {
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -19,6 +19,14 @@
model = "Merrii A20 Hummingbird"; model = "Merrii A20 Hummingbird";
compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20"; compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
aliases {
serial0 = &uart0;
serial1 = &uart2;
serial2 = &uart3;
serial3 = &uart4;
serial4 = &uart5;
};
soc@01c00000 { soc@01c00000 {
mmc0: mmc@01c0f000 { mmc0: mmc@01c0f000 {
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -20,6 +20,9 @@
compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20"; compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
aliases { aliases {
serial0 = &uart0;
serial1 = &uart6;
serial2 = &uart7;
spi0 = &spi1; spi0 = &spi1;
spi1 = &spi2; spi1 = &spi2;
}; };

View File

@ -54,14 +54,6 @@
aliases { aliases {
ethernet0 = &gmac; ethernet0 = &gmac;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
serial6 = &uart6;
serial7 = &uart7;
}; };
chosen { chosen {

View File

@ -55,6 +55,10 @@
model = "Ippo Q8H Dual Core Tablet (v5)"; model = "Ippo Q8H Dual Core Tablet (v5)";
compatible = "ippo,q8h-v5", "allwinner,sun8i-a23"; compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
aliases {
serial0 = &r_uart;
};
chosen { chosen {
bootargs = "earlyprintk console=ttyS0,115200"; bootargs = "earlyprintk console=ttyS0,115200";
}; };

View File

@ -52,15 +52,6 @@
/ { / {
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
aliases {
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &r_uart;
};
cpus { cpus {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;

View File

@ -54,6 +54,11 @@
model = "Merrii A80 Optimus Board"; model = "Merrii A80 Optimus Board";
compatible = "merrii,a80-optimus", "allwinner,sun9i-a80"; compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
aliases {
serial0 = &uart0;
serial1 = &uart4;
};
chosen { chosen {
bootargs = "earlyprintk console=ttyS0,115200"; bootargs = "earlyprintk console=ttyS0,115200";
}; };

View File

@ -52,16 +52,6 @@
/ { / {
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
aliases {
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
serial3 = &uart3;
serial4 = &uart4;
serial5 = &uart5;
serial6 = &r_uart;
};
cpus { cpus {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;

View File

@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK; vcpu->arch.hcr = HCR_GUEST_MASK;
} }
static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.hcr;
}
static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
{
vcpu->arch.hcr = hcr;
}
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{ {
return 1; return 1;

View File

@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes * Anything that is not used directly from assembly code goes
* here. * here.
*/ */
/* dcache set/way operation pending */
int last_pcpu;
cpumask_t require_dcache_flush;
/* Don't run the guest on this vcpu */ /* Don't run the guest on this vcpu */
bool pause; bool pause;

View File

@ -44,6 +44,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/highmem.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
} }
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
unsigned long size, unsigned long size,
bool ipa_uncached) bool ipa_uncached)
{ {
if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size);
/* /*
* If we are going to insert an instruction page and the icache is * If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host * either VIPT or PIPT, there is a potential problem where the host
@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
* *
* VIVT caches are tagged using both the ASID and the VMID and doesn't * VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392). * need any kind of flushing (DDI 0406C.b - Page B3-1392).
*
* We need to do this through a kernel mapping (using the
* user-space mapping has proved to be the wrong
* solution). For that, we need to kmap one page at a time,
* and iterate over the range.
*/ */
if (icache_is_pipt()) {
__cpuc_coherent_user_range(hva, hva + size); bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
} else if (!icache_is_vivt_asid_tagged()) {
VM_BUG_ON(size & PAGE_MASK);
if (!need_flush && !icache_is_pipt())
goto vipt_cache;
while (size) {
void *va = kmap_atomic_pfn(pfn);
if (need_flush)
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
if (icache_is_pipt())
__cpuc_coherent_user_range((unsigned long)va,
(unsigned long)va + PAGE_SIZE);
size -= PAGE_SIZE;
pfn++;
kunmap_atomic(va);
}
vipt_cache:
if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */ /* any kind of VIPT cache */
__flush_icache_all(); __flush_icache_all();
} }
} }
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
void *va = kmap_atomic(pte_page(pte));
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
kunmap_atomic(va);
}
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
unsigned long size = PMD_SIZE;
pfn_t pfn = pmd_pfn(pmd);
while (size) {
void *va = kmap_atomic_pfn(pfn);
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
pfn++;
size -= PAGE_SIZE;
kunmap_atomic(va);
}
}
static inline void __kvm_flush_dcache_pud(pud_t pud)
{
}
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
void stage2_flush_vm(struct kvm *kvm); void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */

View File

@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu; vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
/*
* Check whether this vcpu requires the cache to be flushed on
* this physical CPU. This is a consequence of doing dcache
* operations by set/way on this vcpu. We do it here to be in
* a non-preemptible section.
*/
if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
kvm_arm_set_running_vcpu(vcpu); kvm_arm_set_running_vcpu(vcpu);
} }
@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->arch.last_pcpu = smp_processor_id();
kvm_guest_exit(); kvm_guest_exit();
trace_kvm_exit(*vcpu_pc(vcpu)); trace_kvm_exit(*vcpu_pc(vcpu));
/* /*

View File

@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
return true; return true;
} }
/* See note at ARM ARM B1.14.4 */ /*
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
*/
static bool access_dcsw(struct kvm_vcpu *vcpu, static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct coproc_params *p, const struct coproc_params *p,
const struct coproc_reg *r) const struct coproc_reg *r)
{ {
unsigned long val;
int cpu;
if (!p->is_write) if (!p->is_write)
return read_from_write_only(vcpu, p); return read_from_write_only(vcpu, p);
cpu = get_cpu(); kvm_set_way_flush(vcpu);
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
/* If we were already preempted, take the long way around */
if (cpu != vcpu->arch.last_pcpu) {
flush_cache_all();
goto done;
}
val = *vcpu_reg(vcpu, p->Rt1);
switch (p->CRm) {
case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
case 14: /* DCCISW */
asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
break;
case 10: /* DCCSW */
asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
break;
}
done:
put_cpu();
return true; return true;
} }
/* /*
* Generic accessor for VM registers. Only called as long as HCR_TVM * Generic accessor for VM registers. Only called as long as HCR_TVM
* is set. * is set. If the guest enables the MMU, we stop trapping the VM
* sys_regs and leave it in complete control of the caches.
*
* Used by the cpu-specific code.
*/ */
static bool access_vm_reg(struct kvm_vcpu *vcpu, bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p, const struct coproc_params *p,
const struct coproc_reg *r) const struct coproc_reg *r)
{ {
bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write); BUG_ON(!p->is_write);
vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
if (p->is_64bit) if (p->is_64bit)
vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
return true; kvm_toggle_cache(vcpu, was_enabled);
}
/*
* SCTLR accessor. Only called as long as HCR_TVM is set. If the
* guest enables the MMU, we stop trapping the VM sys_regs and leave
* it in complete control of the caches.
*
* Used by the cpu-specific code.
*/
bool access_sctlr(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
access_vm_reg(vcpu, p, r);
if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
vcpu->arch.hcr &= ~HCR_TVM;
stage2_flush_vm(vcpu->kvm);
}
return true; return true;
} }

View File

@ -153,7 +153,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define is64 .is_64 = true #define is64 .is_64 = true
#define is32 .is_64 = false #define is32 .is_64 = false
bool access_sctlr(struct kvm_vcpu *vcpu, bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p, const struct coproc_params *p,
const struct coproc_reg *r); const struct coproc_reg *r);

View File

@ -34,7 +34,7 @@
static const struct coproc_reg a15_regs[] = { static const struct coproc_reg a15_regs[] = {
/* SCTLR: swapped by interrupt.S. */ /* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_sctlr, reset_val, c1_SCTLR, 0x00C50078 }, access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
}; };
static struct kvm_coproc_target_table a15_target_table = { static struct kvm_coproc_target_table a15_target_table = {

View File

@ -37,7 +37,7 @@
static const struct coproc_reg a7_regs[] = { static const struct coproc_reg a7_regs[] = {
/* SCTLR: swapped by interrupt.S. */ /* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
access_sctlr, reset_val, c1_SCTLR, 0x00C50878 }, access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
}; };
static struct kvm_coproc_target_table a7_target_table = { static struct kvm_coproc_target_table a7_target_table = {

View File

@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
} }
/*
* D-Cache management functions. They take the page table entries by
* value, as they are flushing the cache using the kernel mapping (or
* kmap on 32bit).
*/
static void kvm_flush_dcache_pte(pte_t pte)
{
__kvm_flush_dcache_pte(pte);
}
static void kvm_flush_dcache_pmd(pmd_t pmd)
{
__kvm_flush_dcache_pmd(pmd);
}
static void kvm_flush_dcache_pud(pud_t pud)
{
__kvm_flush_dcache_pud(pud);
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
int min, int max) int min, int max)
{ {
@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
put_page(virt_to_page(pmd)); put_page(virt_to_page(pmd));
} }
/*
* Unmapping vs dcache management:
*
* If a guest maps certain memory pages as uncached, all writes will
* bypass the data cache and go directly to RAM. However, the CPUs
* can still speculate reads (not writes) and fill cache lines with
* data.
*
* Those cache lines will be *clean* cache lines though, so a
* clean+invalidate operation is equivalent to an invalidate
* operation, because no cache lines are marked dirty.
*
* Those clean cache lines could be filled prior to an uncached write
* by the guest, and the cache coherent IO subsystem would therefore
* end up writing old data to disk.
*
* This is why right after unmapping a page/section and invalidating
* the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
* the IO subsystem will never hit in the cache.
*/
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end) phys_addr_t addr, phys_addr_t end)
{ {
@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
start_pte = pte = pte_offset_kernel(pmd, addr); start_pte = pte = pte_offset_kernel(pmd, addr);
do { do {
if (!pte_none(*pte)) { if (!pte_none(*pte)) {
pte_t old_pte = *pte;
kvm_set_pte(pte, __pte(0)); kvm_set_pte(pte, __pte(0));
put_page(virt_to_page(pte));
kvm_tlb_flush_vmid_ipa(kvm, addr); kvm_tlb_flush_vmid_ipa(kvm, addr);
/* No need to invalidate the cache for device mappings */
if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
kvm_flush_dcache_pte(old_pte);
put_page(virt_to_page(pte));
} }
} while (pte++, addr += PAGE_SIZE, addr != end); } while (pte++, addr += PAGE_SIZE, addr != end);
@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
next = kvm_pmd_addr_end(addr, end); next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) { if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) { if (kvm_pmd_huge(*pmd)) {
pmd_t old_pmd = *pmd;
pmd_clear(pmd); pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr); kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm_flush_dcache_pmd(old_pmd);
put_page(virt_to_page(pmd)); put_page(virt_to_page(pmd));
} else { } else {
unmap_ptes(kvm, pmd, addr, next); unmap_ptes(kvm, pmd, addr, next);
@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
next = kvm_pud_addr_end(addr, end); next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) { if (!pud_none(*pud)) {
if (pud_huge(*pud)) { if (pud_huge(*pud)) {
pud_t old_pud = *pud;
pud_clear(pud); pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr); kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm_flush_dcache_pud(old_pud);
put_page(virt_to_page(pud)); put_page(virt_to_page(pud));
} else { } else {
unmap_pmds(kvm, pud, addr, next); unmap_pmds(kvm, pud, addr, next);
@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr); pte = pte_offset_kernel(pmd, addr);
do { do {
if (!pte_none(*pte)) { if (!pte_none(*pte) &&
hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); kvm_flush_dcache_pte(*pte);
}
} while (pte++, addr += PAGE_SIZE, addr != end); } while (pte++, addr += PAGE_SIZE, addr != end);
} }
@ -226,13 +282,11 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
do { do {
next = kvm_pmd_addr_end(addr, end); next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) { if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) { if (kvm_pmd_huge(*pmd))
hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); kvm_flush_dcache_pmd(*pmd);
kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); else
} else {
stage2_flush_ptes(kvm, pmd, addr, next); stage2_flush_ptes(kvm, pmd, addr, next);
} }
}
} while (pmd++, addr = next, addr != end); } while (pmd++, addr = next, addr != end);
} }
@ -246,13 +300,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
do { do {
next = kvm_pud_addr_end(addr, end); next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) { if (!pud_none(*pud)) {
if (pud_huge(*pud)) { if (pud_huge(*pud))
hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); kvm_flush_dcache_pud(*pud);
kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); else
} else {
stage2_flush_pmds(kvm, pud, addr, next); stage2_flush_pmds(kvm, pud, addr, next);
} }
}
} while (pud++, addr = next, addr != end); } while (pud++, addr = next, addr != end);
} }
@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
* Go through the stage 2 page tables and invalidate any cache lines * Go through the stage 2 page tables and invalidate any cache lines
* backing memory already mapped to the VM. * backing memory already mapped to the VM.
*/ */
void stage2_flush_vm(struct kvm *kvm) static void stage2_flush_vm(struct kvm *kvm)
{ {
struct kvm_memslots *slots; struct kvm_memslots *slots;
struct kvm_memory_slot *memslot; struct kvm_memory_slot *memslot;
@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
return !pfn_valid(pfn); return !pfn_valid(pfn);
} }
static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
unsigned long size, bool uncached)
{
__coherent_cache_guest_page(vcpu, pfn, size, uncached);
}
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva, struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status) unsigned long fault_status)
@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pmd_writable(&new_pmd); kvm_set_s2pmd_writable(&new_pmd);
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
} }
coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
fault_ipa_uncached);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else { } else {
pte_t new_pte = pfn_pte(pfn, mem_type); pte_t new_pte = pfn_pte(pfn, mem_type);
@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pte_writable(&new_pte); kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn); kvm_set_pfn_dirty(pfn);
} }
coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
fault_ipa_uncached);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
} }
@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
unmap_stage2_range(kvm, gpa, size); unmap_stage2_range(kvm, gpa, size);
spin_unlock(&kvm->mmu_lock); spin_unlock(&kvm->mmu_lock);
} }
/*
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
*
* Main problems:
* - S/W ops are local to a CPU (not broadcast)
* - We have line migration behind our back (speculation)
* - System caches don't support S/W at all (damn!)
*
* In the face of the above, the best we can do is to try and convert
* S/W ops to VA ops. Because the guest is not allowed to infer the
* S/W to PA mapping, it can only use S/W to nuke the whole cache,
* which is a rather good thing for us.
*
* Also, it is only used when turning caches on/off ("The expected
* usage of the cache maintenance instructions that operate by set/way
* is associated with the cache maintenance instructions associated
* with the powerdown and powerup of caches, if this is required by
* the implementation.").
*
* We use the following policy:
*
* - If we trap a S/W operation, we enable VM trapping to detect
* caches being turned on/off, and do a full clean.
*
* - We flush the caches on both caches being turned on and off.
*
* - Once the caches are enabled, we stop trapping VM ops.
*/
void kvm_set_way_flush(struct kvm_vcpu *vcpu)
{
unsigned long hcr = vcpu_get_hcr(vcpu);
/*
* If this is the first time we do a S/W operation
* (i.e. HCR_TVM not set) flush the whole memory, and set the
* VM trapping.
*
* Otherwise, rely on the VM trapping to wait for the MMU +
* Caches to be turned off. At that point, we'll be able to
* clean the caches again.
*/
if (!(hcr & HCR_TVM)) {
trace_kvm_set_way_flush(*vcpu_pc(vcpu),
vcpu_has_cache_enabled(vcpu));
stage2_flush_vm(vcpu->kvm);
vcpu_set_hcr(vcpu, hcr | HCR_TVM);
}
}
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
{
bool now_enabled = vcpu_has_cache_enabled(vcpu);
/*
* If switching the MMU+caches on, need to invalidate the caches.
* If switching it off, need to clean the caches.
* Clean + invalidate does the trick always.
*/
if (now_enabled != was_enabled)
stage2_flush_vm(vcpu->kvm);
/* Caches are now on, stop trapping VM ops (until a S/W op) */
if (now_enabled)
vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
}

View File

@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
__entry->vcpu_pc, __entry->r0, __entry->imm) __entry->vcpu_pc, __entry->r0, __entry->imm)
); );
TRACE_EVENT(kvm_set_way_flush,
TP_PROTO(unsigned long vcpu_pc, bool cache),
TP_ARGS(vcpu_pc, cache),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( bool, cache )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->cache = cache;
),
TP_printk("S/W flush at 0x%016lx (cache %s)",
__entry->vcpu_pc, __entry->cache ? "on" : "off")
);
TRACE_EVENT(kvm_toggle_cache,
TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
TP_ARGS(vcpu_pc, was, now),
TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( bool, was )
__field( bool, now )
),
TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->was = was;
__entry->now = now;
),
TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
__entry->vcpu_pc, __entry->was ? "on" : "off",
__entry->now ? "on" : "off")
);
#endif /* _TRACE_KVM_H */ #endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_PATH

View File

@ -189,6 +189,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
coherency_cpu_base = of_iomap(np, 0); coherency_cpu_base = of_iomap(np, 0);
arch_ioremap_caller = armada_pcie_wa_ioremap_caller; arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
/*
* We should switch the PL310 to I/O coherency mode only if
* I/O coherency is actually enabled.
*/
if (!coherency_available())
return;
/* /*
* Add the PL310 property "arm,io-coherent". This makes sure the * Add the PL310 property "arm,io-coherent". This makes sure the
* outer sync operation is not used, which allows to * outer sync operation is not used, which allows to

View File

@ -18,6 +18,8 @@
#include <linux/gpio_keys.h> #include <linux/gpio_keys.h>
#include <linux/input.h> #include <linux/input.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mfd/tmio.h> #include <linux/mfd/tmio.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void)
sizeof(ape6evm_leds_pdata)); sizeof(ape6evm_leds_pdata));
} }
static void __init ape6evm_legacy_init_time(void)
{
/* Do not invoke DT-based timers via clocksource_of_init() */
}
static void __init ape6evm_legacy_init_irq(void)
{
void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
gic_init(0, 29, gic_dist_base, gic_cpu_base);
/* Do not invoke DT-based interrupt code via irqchip_init() */
}
static const char *ape6evm_boards_compat_dt[] __initdata = { static const char *ape6evm_boards_compat_dt[] __initdata = {
"renesas,ape6evm", "renesas,ape6evm",
NULL, NULL,
@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
DT_MACHINE_START(APE6EVM_DT, "ape6evm") DT_MACHINE_START(APE6EVM_DT, "ape6evm")
.init_early = shmobile_init_delay, .init_early = shmobile_init_delay,
.init_irq = ape6evm_legacy_init_irq,
.init_machine = ape6evm_add_standard_devices, .init_machine = ape6evm_add_standard_devices,
.init_late = shmobile_init_late, .init_late = shmobile_init_late,
.dt_compat = ape6evm_boards_compat_dt, .dt_compat = ape6evm_boards_compat_dt,
.init_time = ape6evm_legacy_init_time,
MACHINE_END MACHINE_END

View File

@ -21,6 +21,8 @@
#include <linux/input.h> #include <linux/input.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/leds.h> #include <linux/leds.h>
#include <linux/mfd/tmio.h> #include <linux/mfd/tmio.h>
@ -811,6 +813,16 @@ static void __init lager_init(void)
lager_ksz8041_fixup); lager_ksz8041_fixup);
} }
static void __init lager_legacy_init_irq(void)
{
void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
gic_init(0, 29, gic_dist_base, gic_cpu_base);
/* Do not invoke DT-based interrupt code via irqchip_init() */
}
static const char * const lager_boards_compat_dt[] __initconst = { static const char * const lager_boards_compat_dt[] __initconst = {
"renesas,lager", "renesas,lager",
NULL, NULL,
@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = {
DT_MACHINE_START(LAGER_DT, "lager") DT_MACHINE_START(LAGER_DT, "lager")
.smp = smp_ops(r8a7790_smp_ops), .smp = smp_ops(r8a7790_smp_ops),
.init_early = shmobile_init_delay, .init_early = shmobile_init_delay,
.init_irq = lager_legacy_init_irq,
.init_time = rcar_gen2_timer_init, .init_time = rcar_gen2_timer_init,
.init_machine = lager_init, .init_machine = lager_init,
.init_late = shmobile_init_late, .init_late = shmobile_init_late,

View File

@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void)
#ifdef CONFIG_COMMON_CLK #ifdef CONFIG_COMMON_CLK
rcar_gen2_clocks_init(mode); rcar_gen2_clocks_init(mode);
#endif #endif
#ifdef CONFIG_ARCH_SHMOBILE_MULTI
clocksource_of_init(); clocksource_of_init();
#endif
} }
struct memory_reserve_config { struct memory_reserve_config {

View File

@ -70,6 +70,18 @@ void __init shmobile_init_delay(void)
if (!max_freq) if (!max_freq)
return; return;
#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
/* Non-multiplatform r8a73a4 SoC cannot use arch timer due
* to GIC being initialized from C and arch timer via DT */
if (of_machine_is_compatible("renesas,r8a73a4"))
has_arch_timer = false;
/* Non-multiplatform r8a7790 SoC cannot use arch timer due
* to GIC being initialized from C and arch timer via DT */
if (of_machine_is_compatible("renesas,r8a7790"))
has_arch_timer = false;
#endif
if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
if (is_a7_a8_a9) if (is_a7_a8_a9)
shmobile_setup_delay_hz(max_freq, 1, 3); shmobile_setup_delay_hz(max_freq, 1, 3);

View File

@ -1940,17 +1940,7 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
} }
EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
/** static int __arm_iommu_attach_device(struct device *dev,
* arm_iommu_attach_device
* @dev: valid struct device pointer
* @mapping: io address space mapping structure (returned from
* arm_iommu_create_mapping)
*
* Attaches specified io address space mapping to the provided device,
* More than one client might be attached to the same io address space
* mapping.
*/
int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping) struct dma_iommu_mapping *mapping)
{ {
int err; int err;
@ -1965,15 +1955,35 @@ int arm_iommu_attach_device(struct device *dev,
pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
/** /**
* arm_iommu_detach_device * arm_iommu_attach_device
* @dev: valid struct device pointer * @dev: valid struct device pointer
* @mapping: io address space mapping structure (returned from
* arm_iommu_create_mapping)
* *
* Detaches the provided device from a previously attached map. * Attaches specified io address space mapping to the provided device.
* This replaces the dma operations (dma_map_ops pointer) with the
* IOMMU aware version.
*
* More than one client might be attached to the same io address space
* mapping.
*/ */
void arm_iommu_detach_device(struct device *dev) int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
int err;
err = __arm_iommu_attach_device(dev, mapping);
if (err)
return err;
set_dma_ops(dev, &iommu_ops);
return 0;
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
static void __arm_iommu_detach_device(struct device *dev)
{ {
struct dma_iommu_mapping *mapping; struct dma_iommu_mapping *mapping;
@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev)
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
} }
/**
* arm_iommu_detach_device
* @dev: valid struct device pointer
*
* Detaches the provided device from a previously attached map.
* This voids the dma operations (dma_map_ops pointer)
*/
void arm_iommu_detach_device(struct device *dev)
{
__arm_iommu_detach_device(dev);
set_dma_ops(dev, NULL);
}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device); EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
return false; return false;
} }
if (arm_iommu_attach_device(dev, mapping)) { if (__arm_iommu_attach_device(dev, mapping)) {
pr_warn("Failed to attached device %s to IOMMU_mapping\n", pr_warn("Failed to attached device %s to IOMMU_mapping\n",
dev_name(dev)); dev_name(dev));
arm_iommu_release_mapping(mapping); arm_iommu_release_mapping(mapping);
@ -2025,7 +2048,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
{ {
struct dma_iommu_mapping *mapping = dev->archdata.mapping; struct dma_iommu_mapping *mapping = dev->archdata.mapping;
arm_iommu_detach_device(dev); __arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping); arm_iommu_release_mapping(mapping);
} }

View File

@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~HCR_RW; vcpu->arch.hcr_el2 &= ~HCR_RW;
} }
static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.hcr_el2;
}
static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
{
vcpu->arch.hcr_el2 = hcr;
}
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{ {
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;

View File

@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes * Anything that is not used directly from assembly code goes
* here. * here.
*/ */
/* dcache set/way operation pending */
int last_pcpu;
cpumask_t require_dcache_flush;
/* Don't run the guest */ /* Don't run the guest */
bool pause; bool pause;

View File

@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
} }
static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
unsigned long size, unsigned long size,
bool ipa_uncached) bool ipa_uncached)
{ {
void *va = page_address(pfn_to_page(pfn));
if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
kvm_flush_dcache_to_poc((void *)hva, size); kvm_flush_dcache_to_poc(va, size);
if (!icache_is_aliasing()) { /* PIPT */ if (!icache_is_aliasing()) { /* PIPT */
flush_icache_range(hva, hva + size); flush_icache_range((unsigned long)va,
(unsigned long)va + size);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */ /* any kind of VIPT cache */
__flush_icache_all(); __flush_icache_all();
} }
} }
static inline void __kvm_flush_dcache_pte(pte_t pte)
{
struct page *page = pte_page(pte);
kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
}
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
{
struct page *page = pmd_page(pmd);
kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
}
static inline void __kvm_flush_dcache_pud(pud_t pud)
{
struct page *page = pud_page(pud);
kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
}
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
void stage2_flush_vm(struct kvm *kvm); void kvm_set_way_flush(struct kvm_vcpu *vcpu);
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */ #endif /* __ARM64_KVM_MMU_H__ */

View File

@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
return ccsidr; return ccsidr;
} }
static void do_dc_cisw(u32 val) /*
{ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
asm volatile("dc cisw, %x0" : : "r" (val)); */
dsb(ish);
}
static void do_dc_csw(u32 val)
{
asm volatile("dc csw, %x0" : : "r" (val));
dsb(ish);
}
/* See note at ARM ARM B1.14.4 */
static bool access_dcsw(struct kvm_vcpu *vcpu, static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p, const struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
unsigned long val;
int cpu;
if (!p->is_write) if (!p->is_write)
return read_from_write_only(vcpu, p); return read_from_write_only(vcpu, p);
cpu = get_cpu(); kvm_set_way_flush(vcpu);
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
/* If we were already preempted, take the long way around */
if (cpu != vcpu->arch.last_pcpu) {
flush_cache_all();
goto done;
}
val = *vcpu_reg(vcpu, p->Rt);
switch (p->CRm) {
case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
case 14: /* DCCISW */
do_dc_cisw(val);
break;
case 10: /* DCCSW */
do_dc_csw(val);
break;
}
done:
put_cpu();
return true; return true;
} }
/* /*
* Generic accessor for VM registers. Only called as long as HCR_TVM * Generic accessor for VM registers. Only called as long as HCR_TVM
* is set. * is set. If the guest enables the MMU, we stop trapping the VM
* sys_regs and leave it in complete control of the caches.
*/ */
static bool access_vm_reg(struct kvm_vcpu *vcpu, static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p, const struct sys_reg_params *p,
const struct sys_reg_desc *r) const struct sys_reg_desc *r)
{ {
unsigned long val; unsigned long val;
bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write); BUG_ON(!p->is_write);
@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
} }
return true; kvm_toggle_cache(vcpu, was_enabled);
}
/*
* SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
* guest enables the MMU, we stop trapping the VM sys_regs and leave
* it in complete control of the caches.
*/
static bool access_sctlr(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
access_vm_reg(vcpu, p, r);
if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
vcpu->arch.hcr_el2 &= ~HCR_TVM;
stage2_flush_vm(vcpu->kvm);
}
return true; return true;
} }
@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
NULL, reset_mpidr, MPIDR_EL1 }, NULL, reset_mpidr, MPIDR_EL1 },
/* SCTLR_EL1 */ /* SCTLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
/* CPACR_EL1 */ /* CPACR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
NULL, reset_val, CPACR_EL1, 0 }, NULL, reset_val, CPACR_EL1, 0 },
@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
* register). * register).
*/ */
static const struct sys_reg_desc cp15_regs[] = { static const struct sys_reg_desc cp15_regs[] = {
{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },

View File

@ -142,6 +142,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -176,6 +176,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -172,6 +172,8 @@ retry:
*/ */
if (fault & VM_FAULT_OOM) { if (fault & VM_FAULT_OOM) {
goto out_of_memory; goto out_of_memory;
} else if (fault & VM_FAULT_SIGSEGV) {
goto bad_area;
} else if (fault & VM_FAULT_SIGBUS) { } else if (fault & VM_FAULT_SIGBUS) {
signal = SIGBUS; signal = SIGBUS;
goto bad_area; goto bad_area;

View File

@ -200,6 +200,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -145,6 +145,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto map_err;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto bus_err; goto bus_err;
BUG(); BUG();

View File

@ -141,6 +141,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -224,6 +224,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -158,6 +158,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -262,6 +262,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -135,6 +135,8 @@ survive:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -171,6 +171,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -256,6 +256,8 @@ good_area:
*/ */
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto bad_area; goto bad_area;
BUG(); BUG();

View File

@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (*flt & VM_FAULT_OOM) { if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unlock; goto out_unlock;
} else if (*flt & VM_FAULT_SIGBUS) { } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
ret = -EFAULT; ret = -EFAULT;
goto out_unlock; goto out_unlock;
} }

View File

@ -437,6 +437,8 @@ good_area:
*/ */
fault = handle_mm_fault(mm, vma, address, flags); fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
rc = mm_fault_error(regs, address, fault); rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN) if (rc >= MM_FAULT_RETURN)
goto bail; goto bail;

View File

@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
* all cpus at boot. Get these reg values of current cpu and use the * all cpus at boot. Get these reg values of current cpu and use the
* same accross all cpus. * same accross all cpus.
*/ */
uint64_t lpcr_val = mfspr(SPRN_LPCR); uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
uint64_t hid0_val = mfspr(SPRN_HID0); uint64_t hid0_val = mfspr(SPRN_HID0);
uint64_t hid1_val = mfspr(SPRN_HID1); uint64_t hid1_val = mfspr(SPRN_HID1);
uint64_t hid4_val = mfspr(SPRN_HID4); uint64_t hid4_val = mfspr(SPRN_HID4);

View File

@ -337,6 +337,7 @@ static inline void disable_surveillance(void)
args.token = rtas_token("set-indicator"); args.token = rtas_token("set-indicator");
if (args.token == RTAS_UNKNOWN_SERVICE) if (args.token == RTAS_UNKNOWN_SERVICE)
return; return;
args.token = cpu_to_be32(args.token);
args.nargs = cpu_to_be32(3); args.nargs = cpu_to_be32(3);
args.nret = cpu_to_be32(1); args.nret = cpu_to_be32(1);
args.rets = &args.args[3]; args.rets = &args.args[3];

View File

@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
do_no_context(regs); do_no_context(regs);
else else
pagefault_out_of_memory(); pagefault_out_of_memory();
} else if (fault & VM_FAULT_SIGSEGV) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
do_no_context(regs);
else
do_sigsegv(regs, SEGV_MAPERR);
} else if (fault & VM_FAULT_SIGBUS) { } else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */ /* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) if (!user_mode(regs))

View File

@ -114,6 +114,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else { } else {
if (fault & VM_FAULT_SIGBUS) if (fault & VM_FAULT_SIGBUS)
do_sigbus(regs, error_code, address); do_sigbus(regs, error_code, address);
else if (fault & VM_FAULT_SIGSEGV)
bad_area(regs, error_code, address);
else else
BUG(); BUG();
} }

View File

@ -249,6 +249,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -446,6 +446,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -442,6 +442,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -80,6 +80,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) { if (fault & VM_FAULT_OOM) {
goto out_of_memory; goto out_of_memory;
} else if (fault & VM_FAULT_SIGSEGV) {
goto out;
} else if (fault & VM_FAULT_SIGBUS) { } else if (fault & VM_FAULT_SIGBUS) {
err = -EACCES; err = -EACCES;
goto out; goto out;

View File

@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_LZ4) := lz4 suffix-$(CONFIG_KERNEL_LZ4) := lz4
RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
perl $(srctree)/arch/x86/tools/calc_run_size.pl) $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
quiet_cmd_mkpiggy = MKPIGGY $@ quiet_cmd_mkpiggy = MKPIGGY $@
cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )

View File

@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
break; break;
case 55: /* 22nm Atom "Silvermont" */ case 55: /* 22nm Atom "Silvermont" */
case 76: /* 14nm Atom "Airmont" */
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids)); sizeof(hw_cache_event_ids));

View File

@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
* or use ldexp(count, -32). * or use ldexp(count, -32).
* Watts = Joules/Time delta * Watts = Joules/Time delta
*/ */
return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
} }
static u64 rapl_event_update(struct perf_event *event) static u64 rapl_event_update(struct perf_event *event)

View File

@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
box->phys_id = phys_id; box->phys_id = phys_id;
box->pci_dev = pdev; box->pci_dev = pdev;
box->pmu = pmu; box->pmu = pmu;
uncore_box_init(box);
pci_set_drvdata(pdev, box); pci_set_drvdata(pdev, box);
raw_spin_lock(&uncore_box_lock); raw_spin_lock(&uncore_box_lock);
@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
pmu = &type->pmus[j]; pmu = &type->pmus[j];
box = *per_cpu_ptr(pmu->box, cpu); box = *per_cpu_ptr(pmu->box, cpu);
/* called by uncore_cpu_init? */ /* called by uncore_cpu_init? */
if (box && box->phys_id >= 0) { if (box && box->phys_id >= 0)
uncore_box_init(box);
continue; continue;
}
for_each_online_cpu(k) { for_each_online_cpu(k) {
exist = *per_cpu_ptr(pmu->box, k); exist = *per_cpu_ptr(pmu->box, k);
@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
} }
} }
if (box) { if (box)
box->phys_id = phys_id; box->phys_id = phys_id;
uncore_box_init(box);
}
} }
} }
return 0; return 0;

View File

@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
return box->pmu->type->num_counters; return box->pmu->type->num_counters;
} }
static inline void uncore_box_init(struct intel_uncore_box *box)
{
if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
if (box->pmu->type->ops->init_box)
box->pmu->type->ops->init_box(box);
}
}
static inline void uncore_disable_box(struct intel_uncore_box *box) static inline void uncore_disable_box(struct intel_uncore_box *box)
{ {
if (box->pmu->type->ops->disable_box) if (box->pmu->type->ops->disable_box)
@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
static inline void uncore_enable_box(struct intel_uncore_box *box) static inline void uncore_enable_box(struct intel_uncore_box *box)
{ {
uncore_box_init(box);
if (box->pmu->type->ops->enable_box) if (box->pmu->type->ops->enable_box)
box->pmu->type->ops->enable_box(box); box->pmu->type->ops->enable_box(box);
} }
@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
return box->pmu->type->ops->read_counter(box, event); return box->pmu->type->ops->read_counter(box, event);
} }
static inline void uncore_box_init(struct intel_uncore_box *box)
{
if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
if (box->pmu->type->ops->init_box)
box->pmu->type->ops->init_box(box);
}
}
static inline bool uncore_box_is_fake(struct intel_uncore_box *box) static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{ {
return (box->phys_id < 0); return (box->phys_id < 0);

View File

@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
u16 cid, lid; u16 cid, lid;
u32 ldr, aid; u32 ldr, aid;
if (!kvm_apic_present(vcpu))
continue;
aid = kvm_apic_id(apic); aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR); ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr); cid = apic_cluster_id(new, ldr);

View File

@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE)) VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault); do_sigbus(regs, error_code, address, fault);
else if (fault & VM_FAULT_SIGSEGV)
bad_area_nosemaphore(regs, error_code, address);
else else
BUG(); BUG();
} }

View File

@ -1,39 +0,0 @@
#!/usr/bin/perl
#
# Calculate the amount of space needed to run the kernel, including room for
# the .bss and .brk sections.
#
# Usage:
# objdump -h a.out | perl calc_run_size.pl
use strict;
my $mem_size = 0;
my $file_offset = 0;
my $sections=" *[0-9]+ \.(?:bss|brk) +";
while (<>) {
if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
my $size = hex($1);
my $offset = hex($2);
$mem_size += $size;
if ($file_offset == 0) {
$file_offset = $offset;
} elsif ($file_offset != $offset) {
# BFD linker shows the same file offset in ELF.
# Gold linker shows them as consecutive.
next if ($file_offset + $mem_size == $offset + $size);
printf STDERR "file_offset: 0x%lx\n", $file_offset;
printf STDERR "mem_size: 0x%lx\n", $mem_size;
printf STDERR "offset: 0x%lx\n", $offset;
printf STDERR "size: 0x%lx\n", $size;
die ".bss and .brk are non-contiguous\n";
}
}
}
if ($file_offset == 0) {
die "Never found .bss or .brk file offset\n";
}
printf("%d\n", $mem_size + $file_offset);

View File

@ -0,0 +1,42 @@
#!/bin/sh
#
# Calculate the amount of space needed to run the kernel, including room for
# the .bss and .brk sections.
#
# Usage:
# objdump -h a.out | sh calc_run_size.sh
NUM='\([0-9a-fA-F]*[ \t]*\)'
OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
if [ -z "$OUT" ] ; then
echo "Never found .bss or .brk file offset" >&2
exit 1
fi
OUT=$(echo ${OUT# })
sizeA=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
offsetA=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
sizeB=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
offsetB=$(printf "%d" 0x${OUT%% *})
run_size=$(( $offsetA + $sizeA + $sizeB ))
# BFD linker shows the same file offset in ELF.
if [ "$offsetA" -ne "$offsetB" ] ; then
# Gold linker shows them as consecutive.
endB=$(( $offsetB + $sizeB ))
if [ "$endB" != "$run_size" ] ; then
printf "sizeA: 0x%x\n" $sizeA >&2
printf "offsetA: 0x%x\n" $offsetA >&2
printf "sizeB: 0x%x\n" $sizeB >&2
printf "offsetB: 0x%x\n" $offsetB >&2
echo ".bss and .brk are non-contiguous" >&2
exit 1
fi
fi
printf "%d\n" $run_size
exit 0

View File

@ -117,6 +117,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) if (fault & VM_FAULT_OOM)
goto out_of_memory; goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS) else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus; goto do_sigbus;
BUG(); BUG();

View File

@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig" source "drivers/platform/Kconfig"
source "drivers/soc/Kconfig"
source "drivers/clk/Kconfig" source "drivers/clk/Kconfig"
source "drivers/hwspinlock/Kconfig" source "drivers/hwspinlock/Kconfig"

View File

@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
* If an image has a non-zero parent overlap, get a reference to its * If an image has a non-zero parent overlap, get a reference to its
* parent. * parent.
* *
* We must get the reference before checking for the overlap to
* coordinate properly with zeroing the parent overlap in
* rbd_dev_v2_parent_info() when an image gets flattened. We
* drop it again if there is no overlap.
*
* Returns true if the rbd device has a parent with a non-zero * Returns true if the rbd device has a parent with a non-zero
* overlap and a reference for it was successfully taken, or * overlap and a reference for it was successfully taken, or
* false otherwise. * false otherwise.
*/ */
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{ {
int counter; int counter = 0;
if (!rbd_dev->parent_spec) if (!rbd_dev->parent_spec)
return false; return false;
down_read(&rbd_dev->header_rwsem);
if (rbd_dev->parent_overlap)
counter = atomic_inc_return_safe(&rbd_dev->parent_ref); counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
if (counter > 0 && rbd_dev->parent_overlap) up_read(&rbd_dev->header_rwsem);
return true;
/* Image was flattened, but parent is not yet torn down */
if (counter < 0) if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow"); rbd_warn(rbd_dev, "parent reference overflow");
return false; return counter > 0;
} }
/* /*
@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
*/ */
if (rbd_dev->parent_overlap) { if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0; rbd_dev->parent_overlap = 0;
smp_mb();
rbd_dev_parent_put(rbd_dev); rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n", pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name); rbd_dev->disk->disk_name);
@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
* treat it specially. * treat it specially.
*/ */
rbd_dev->parent_overlap = overlap; rbd_dev->parent_overlap = overlap;
smp_mb();
if (!overlap) { if (!overlap) {
/* A null parent_spec indicates it's the initial probe */ /* A null parent_spec indicates it's the initial probe */
@ -5114,9 +5106,6 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{ {
struct rbd_image_header *header; struct rbd_image_header *header;
/* Drop parent reference unless it's already been done (or none) */
if (rbd_dev->parent_overlap)
rbd_dev_parent_put(rbd_dev); rbd_dev_parent_put(rbd_dev);
/* Free dynamic fields from the header, then zero it out */ /* Free dynamic fields from the header, then zero it out */

View File

@ -26,6 +26,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include "kfd_priv.h" #include "kfd_priv.h"
#include "kfd_device_queue_manager.h" #include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers.h"
#define MQD_SIZE_ALIGNED 768 #define MQD_SIZE_ALIGNED 768
@ -169,8 +170,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources; kfd->shared_resources = *gpu_resources;
/* calculate max size of mqds needed for queues */ /* calculate max size of mqds needed for queues */
size = max_num_of_processes * size = max_num_of_queues_per_device *
max_num_of_queues_per_process *
kfd->device_info->mqd_size_aligned; kfd->device_info->mqd_size_aligned;
/* add another 512KB for all other allocations on gart */ /* add another 512KB for all other allocations on gart */

View File

@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return -EPERM;
}
if (list_empty(&qpd->queues_list)) { if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q); retval = allocate_vmid(dqm, qpd, q);
if (retval != 0) { if (retval != 0) {
@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
list_add(&q->list, &qpd->queues_list); list_add(&q->list, &qpd->queues_list);
dqm->queue_count++; dqm->queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return 0; return 0;
} }
@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
if (list_empty(&qpd->queues_list)) if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q); deallocate_vmid(dqm, qpd, q);
dqm->queue_count--; dqm->queue_count--;
/*
* Unconditionally decrement this counter, regardless of the queue's
* type
*/
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
out: out:
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return retval; return retval;
@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
for (i = 0; i < pipes_num; i++) { for (i = 0; i < pipes_num; i++) {
inx = i + first_pipe; inx = i + first_pipe;
/*
* HPD buffer on GTT is allocated by amdkfd, no need to waste
* space in GTT for pipelines we don't initialize
*/
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES; pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr); pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */ /* = log2(bytes/4)-1 */
kfd2kgd->init_pipeline(dqm->dev->kgd, i, kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr); CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
} }
@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
pr_debug("kfd: In %s\n", __func__); pr_debug("kfd: In %s\n", __func__);
retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
if (retval != 0) if (retval != 0)
return retval; return retval;
@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In func %s\n", __func__); pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock);
return -EPERM;
}
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list); list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++; dqm->queue_count++;
qpd->is_debug = true; qpd->is_debug = true;
@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count--; dqm->queue_count--;
qpd->is_debug = false; qpd->is_debug = false;
execute_queues_cpsch(dqm, false); execute_queues_cpsch(dqm, false);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
} }
@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock); mutex_lock(&dqm->lock);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
dqm->total_queue_count);
retval = -EPERM;
goto out;
}
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
if (mqd == NULL) { if (mqd == NULL) {
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = execute_queues_cpsch(dqm, false); retval = execute_queues_cpsch(dqm, false);
} }
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
*/
dqm->total_queue_count++;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
out: out:
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return retval; return retval;
@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
/*
* Unconditionally decrement this counter, regardless of the queue's
* type
*/
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
mutex_unlock(&dqm->lock); mutex_unlock(&dqm->lock);
return 0; return 0;

View File

@ -130,6 +130,7 @@ struct device_queue_manager {
struct list_head queues; struct list_head queues;
unsigned int processes_count; unsigned int processes_count;
unsigned int queue_count; unsigned int queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate; unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues; unsigned int *allocated_queues;
unsigned int vmid_bitmap; unsigned int vmid_bitmap;

View File

@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy, MODULE_PARM_DESC(sched_policy,
"Kernel cmdline parameter that defines the amdkfd scheduling policy"); "Kernel cmdline parameter that defines the amdkfd scheduling policy");
int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_processes, int, 0444); module_param(max_num_of_queues_per_device, int, 0444);
MODULE_PARM_DESC(max_num_of_processes, MODULE_PARM_DESC(max_num_of_queues_per_device,
"Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
module_param(max_num_of_queues_per_process, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_process,
"Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
bool kgd2kfd_init(unsigned interface_version, bool kgd2kfd_init(unsigned interface_version,
const struct kfd2kgd_calls *f2g, const struct kfd2kgd_calls *f2g,
@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
} }
/* Verify module parameters */ /* Verify module parameters */
if ((max_num_of_processes < 0) || if ((max_num_of_queues_per_device < 0) ||
(max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) { (max_num_of_queues_per_device >
pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n"); KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
return -1; pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
}
if ((max_num_of_queues_per_process < 0) ||
(max_num_of_queues_per_process >
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
return -1; return -1;
} }

View File

@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
int kfd_pasid_init(void) int kfd_pasid_init(void)
{ {
pasid_limit = max_num_of_processes; pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap) if (!pasid_bitmap)

View File

@ -52,20 +52,19 @@
#define kfd_alloc_struct(ptr_to_struct) \ #define kfd_alloc_struct(ptr_to_struct) \
((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
/* Kernel module parameter to specify maximum number of supported processes */
extern int max_num_of_processes;
#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
#define KFD_MAX_NUM_OF_PROCESSES 512 #define KFD_MAX_NUM_OF_PROCESSES 512
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/* /*
* Kernel module parameter to specify maximum number of supported queues * Kernel module parameter to specify maximum number of supported queues per
* per process * device
*/ */
extern int max_num_of_queues_per_process; extern int max_num_of_queues_per_device;
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
(KFD_MAX_NUM_OF_PROCESSES * \
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
#define KFD_KERNEL_QUEUE_SIZE 2048 #define KFD_KERNEL_QUEUE_SIZE 2048

View File

@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("kfd: in %s\n", __func__); pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap, found = find_first_zero_bit(pqm->queue_slot_bitmap,
max_num_of_queues_per_process); KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found); pr_debug("kfd: the new slot id %lu\n", found);
if (found >= max_num_of_queues_per_process) { if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n", pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
pqm->process->pasid); pqm->process->pasid);
return -ENOMEM; return -ENOMEM;
@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
INIT_LIST_HEAD(&pqm->queues); INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap = pqm->queue_slot_bitmap =
kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process, kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL); BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL) if (pqm->queue_slot_bitmap == NULL)
return -ENOMEM; return -ENOMEM;
@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->kq = NULL; pqn->kq = NULL;
retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid); &q->properties.vmid);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q); print_queue(q);
break; break;
case KFD_QUEUE_TYPE_DIQ: case KFD_QUEUE_TYPE_DIQ:
@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
} }
if (retval != 0) { if (retval != 0) {
pr_err("kfd: error dqm create queue\n"); pr_debug("Error dqm create queue\n");
goto err_create_queue; goto err_create_queue;
} }
@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
err_create_queue: err_create_queue:
kfree(pqn); kfree(pqn);
err_allocate_pqn: err_allocate_pqn:
/* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap); clear_bit(*qid, pqm->queue_slot_bitmap);
if (list_empty(&pqm->queues))
dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
return retval; return retval;
} }

View File

@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
} }
EXPORT_SYMBOL(drm_fb_helper_add_one_connector); EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
static void remove_from_modeset(struct drm_mode_set *set,
struct drm_connector *connector)
{
int i, j;
for (i = 0; i < set->num_connectors; i++) {
if (set->connectors[i] == connector)
break;
}
if (i == set->num_connectors)
return;
for (j = i + 1; j < set->num_connectors; j++) {
set->connectors[j - 1] = set->connectors[j];
}
set->num_connectors--;
/* because i915 is pissy about this..
* TODO maybe need to makes sure we set it back to !=NULL somewhere?
*/
if (set->num_connectors == 0)
set->fb = NULL;
}
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector) struct drm_connector *connector)
{ {
@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
} }
fb_helper->connector_count--; fb_helper->connector_count--;
kfree(fb_helper_connector); kfree(fb_helper_connector);
/* also cleanup dangling references to the connector: */
for (i = 0; i < fb_helper->crtc_count; i++)
remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
return 0; return 0;
} }
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector); EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);

View File

@ -32,6 +32,8 @@
struct tda998x_priv { struct tda998x_priv {
struct i2c_client *cec; struct i2c_client *cec;
struct i2c_client *hdmi; struct i2c_client *hdmi;
struct mutex mutex;
struct delayed_work dwork;
uint16_t rev; uint16_t rev;
uint8_t current_page; uint8_t current_page;
int dpms; int dpms;
@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
uint8_t addr = REG2ADDR(reg); uint8_t addr = REG2ADDR(reg);
int ret; int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg); ret = set_page(priv, reg);
if (ret < 0) if (ret < 0)
return ret; goto out;
ret = i2c_master_send(client, &addr, sizeof(addr)); ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0) if (ret < 0)
@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
if (ret < 0) if (ret < 0)
goto fail; goto fail;
return ret; goto out;
fail: fail:
dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg); dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
return ret; return ret;
} }
@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
buf[0] = REG2ADDR(reg); buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt); memcpy(&buf[1], p, cnt);
mutex_lock(&priv->mutex);
ret = set_page(priv, reg); ret = set_page(priv, reg);
if (ret < 0) if (ret < 0)
return; goto out;
ret = i2c_master_send(client, buf, cnt + 1); ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0) if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
} }
static int static int
@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
uint8_t buf[] = {REG2ADDR(reg), val}; uint8_t buf[] = {REG2ADDR(reg), val};
int ret; int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg); ret = set_page(priv, reg);
if (ret < 0) if (ret < 0)
return; goto out;
ret = i2c_master_send(client, buf, sizeof(buf)); ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0) if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
} }
static void static void
@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
uint8_t buf[] = {REG2ADDR(reg), val >> 8, val}; uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
int ret; int ret;
mutex_lock(&priv->mutex);
ret = set_page(priv, reg); ret = set_page(priv, reg);
if (ret < 0) if (ret < 0)
return; goto out;
ret = i2c_master_send(client, buf, sizeof(buf)); ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0) if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg); dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
out:
mutex_unlock(&priv->mutex);
} }
static void static void
@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24); reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
} }
/* handle HDMI connect/disconnect */
static void tda998x_hpd(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct tda998x_priv *priv =
container_of(dwork, struct tda998x_priv, dwork);
if (priv->encoder && priv->encoder->dev)
drm_kms_helper_hotplug_event(priv->encoder->dev);
}
/* /*
* only 2 interrupts may occur: screen plug/unplug and EDID read * only 2 interrupts may occur: screen plug/unplug and EDID read
*/ */
@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
priv->wq_edid_wait = 0; priv->wq_edid_wait = 0;
wake_up(&priv->wq_edid); wake_up(&priv->wq_edid);
} else if (cec != 0) { /* HPD change */ } else if (cec != 0) { /* HPD change */
if (priv->encoder && priv->encoder->dev) schedule_delayed_work(&priv->dwork, HZ/10);
drm_helper_hpd_irq_event(priv->encoder->dev);
} }
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
/* disable all IRQs and free the IRQ handler */ /* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0); cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD); reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
if (priv->hdmi->irq) if (priv->hdmi->irq) {
free_irq(priv->hdmi->irq, priv); free_irq(priv->hdmi->irq, priv);
cancel_delayed_work_sync(&priv->dwork);
}
i2c_unregister_device(priv->cec); i2c_unregister_device(priv->cec);
} }
@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node; struct device_node *np = client->dev.of_node;
u32 video; u32 video;
int rev_lo, rev_hi, ret; int rev_lo, rev_hi, ret;
unsigned short cec_addr;
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3); priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1); priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->current_page = 0xff; priv->current_page = 0xff;
priv->hdmi = client; priv->hdmi = client;
priv->cec = i2c_new_dummy(client->adapter, 0x34); /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
cec_addr = 0x34 + (client->addr & 0x03);
priv->cec = i2c_new_dummy(client->adapter, cec_addr);
if (!priv->cec) if (!priv->cec)
return -ENODEV; return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF; priv->dpms = DRM_MODE_DPMS_OFF;
mutex_init(&priv->mutex); /* protect the page access */
/* wake up the device: */ /* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS, cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI); CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
if (client->irq) { if (client->irq) {
int irqf_trigger; int irqf_trigger;
/* init read EDID waitqueue */ /* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid); init_waitqueue_head(&priv->wq_edid);
INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
/* clear pending interrupts */ /* clear pending interrupts */
reg_read(priv, REG_INT_FLAGS_0); reg_read(priv, REG_INT_FLAGS_0);

View File

@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT; dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n"); DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev)); WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(IS_HSW_ULT(dev)); WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (IS_BROADWELL(dev)) {
dev_priv->pch_type = PCH_LPT;
dev_priv->pch_id =
INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
DRM_DEBUG_KMS("This is Broadwell, assuming "
"LynxPoint LP PCH\n");
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT; dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev)); WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
WARN_ON(!IS_HSW_ULT(dev)); WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT; dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); DRM_DEBUG_KMS("Found SunrisePoint PCH\n");

View File

@ -2167,8 +2167,7 @@ struct drm_i915_cmd_table {
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00) (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
((INTEL_DEVID(dev) & 0xf) == 0x2 || \ ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe)) (INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020) (INTEL_DEVID(dev) & 0x00F0) == 0x0020)

View File

@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
u32 size = i915_gem_obj_ggtt_size(obj); u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val; uint64_t val;
/* Adjust fence size to match tiled area */
if (obj->tiling_mode != I915_TILING_NONE) {
uint32_t row_size = obj->stride *
(obj->tiling_mode == I915_TILING_Y ? 32 : 8);
size = (size / row_size) * row_size;
}
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) & val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32; 0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000; val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@ -4884,13 +4891,12 @@ i915_gem_init_hw(struct drm_device *dev)
for (i = 0; i < NUM_L3_SLICES(dev); i++) for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i); i915_gem_l3_remap(&dev_priv->ring[RCS], i);
/* ret = i915_ppgtt_init_hw(dev);
* XXX: Contexts should only be initialized once. Doing a switch to the if (ret && ret != -EIO) {
* default context switch however is something we'd like to do after DRM_ERROR("PPGTT enable failed %d\n", ret);
* reset or thaw (the latter may not actually be necessary for HW, but i915_gem_cleanup_ringbuffer(dev);
* goes with our code better). Context switching requires rings (for }
* the do_switch), but before enabling PPGTT. So don't move this.
*/
ret = i915_gem_context_enable(dev_priv); ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) { if (ret && ret != -EIO) {
DRM_ERROR("Context enable failed %d\n", ret); DRM_ERROR("Context enable failed %d\n", ret);
@ -4899,12 +4905,6 @@ i915_gem_init_hw(struct drm_device *dev)
return ret; return ret;
} }
ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
}
return ret; return ret;
} }

View File

@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0); WARN_ON(panel->backlight.max == 0);
if (panel->backlight.level == 0) { if (panel->backlight.level <= panel->backlight.min) {
panel->backlight.level = panel->backlight.max; panel->backlight.level = panel->backlight.max;
if (panel->backlight.device) if (panel->backlight.device)
panel->backlight.device->props.brightness = panel->backlight.device->props.brightness =

View File

@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) { if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr); value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) { } else if (flags & R600_PTE_VALID) {
value = addr; value = addr;
} else { } else {

View File

@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) { for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) { if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr); value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) { } else if (flags & R600_PTE_VALID) {
value = addr; value = addr;
} else { } else {

View File

@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return r; return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page; rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev); return radeon_gart_table_ram_alloc(rdev);
} }
@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0); WREG32(RADEON_AIC_HI_ADDR, 0);
} }
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
return addr;
}
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags) uint64_t entry)
{ {
u32 *gtt = rdev->gart.ptr; u32 *gtt = rdev->gart.ptr;
gtt[i] = cpu_to_le32(lower_32_bits(addr)); gtt[i] = cpu_to_le32(lower_32_bits(entry));
} }
void r100_pci_gart_fini(struct radeon_device *rdev) void r100_pci_gart_fini(struct radeon_device *rdev)

View File

@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2) #define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3) #define R300_PTE_READABLE (1 << 3)
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
uint64_t addr, uint32_t flags)
{ {
void __iomem *ptr = rdev->gart.ptr;
addr = (lower_32_bits(addr) >> 8) | addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24); ((upper_32_bits(addr) & 0xff) << 24);
if (flags & RADEON_GART_PAGE_READ) if (flags & RADEON_GART_PAGE_READ)
@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R300_PTE_WRITEABLE; addr |= R300_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP)) if (!(flags & RADEON_GART_PAGE_SNOOP))
addr |= R300_PTE_UNSNOOPED; addr |= R300_PTE_UNSNOOPED;
return addr;
}
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t entry)
{
void __iomem *ptr = rdev->gart.ptr;
/* on x86 we want this to be CPU endian, on powerpc /* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way * on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */ * into VRAM - so no need for cpu_to_le32 on VRAM tables */
writel(addr, ((void __iomem *)ptr) + (i * 4)); writel(entry, ((void __iomem *)ptr) + (i * 4));
} }
int rv370_pcie_gart_init(struct radeon_device *rdev) int rv370_pcie_gart_init(struct radeon_device *rdev)
@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev); return radeon_gart_table_vram_alloc(rdev);
} }

View File

@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page * Dummy page
*/ */
struct radeon_dummy_page { struct radeon_dummy_page {
uint64_t entry;
struct page *page; struct page *page;
dma_addr_t addr; dma_addr_t addr;
}; };
@ -645,7 +646,7 @@ struct radeon_gart {
unsigned num_cpu_pages; unsigned num_cpu_pages;
unsigned table_size; unsigned table_size;
struct page **pages; struct page **pages;
dma_addr_t *pages_addr; uint64_t *pages_entry;
bool ready; bool ready;
}; };
@ -1847,8 +1848,9 @@ struct radeon_asic {
/* gart */ /* gart */
struct { struct {
void (*tlb_flush)(struct radeon_device *rdev); void (*tlb_flush)(struct radeon_device *rdev);
uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
void (*set_page)(struct radeon_device *rdev, unsigned i, void (*set_page)(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
} gart; } gart;
struct { struct {
int (*init)(struct radeon_device *rdev); int (*init)(struct radeon_device *rdev);
@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) #define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))

View File

@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
DRM_INFO("Forcing AGP to PCIE mode\n"); DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE; rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else { } else {
DRM_INFO("Forcing AGP to PCI mode\n"); DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI; rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page; rdev->asic->gart.set_page = &r100_pci_gart_set_page;
} }
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &r100_pci_gart_tlb_flush, .tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page, .set_page = &r100_pci_gart_set_page,
}, },
.ring = { .ring = {
@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &r100_pci_gart_tlb_flush, .tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page, .set_page = &r100_pci_gart_set_page,
}, },
.ring = { .ring = {
@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &r100_pci_gart_tlb_flush, .tlb_flush = &r100_pci_gart_tlb_flush,
.get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page, .set_page = &r100_pci_gart_set_page,
}, },
.ring = { .ring = {
@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
.mc_wait_for_idle = &r300_mc_wait_for_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush, .tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush, .tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
.mc_wait_for_idle = &rs400_mc_wait_for_idle, .mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rs400_gart_tlb_flush, .tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page, .set_page = &rs400_gart_set_page,
}, },
.ring = { .ring = {
@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
.mc_wait_for_idle = &rs600_mc_wait_for_idle, .mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rs600_gart_tlb_flush, .tlb_flush = &rs600_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {
.mc_wait_for_idle = &rs690_mc_wait_for_idle, .mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rs400_gart_tlb_flush, .tlb_flush = &rs400_gart_tlb_flush,
.get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page, .set_page = &rs400_gart_set_page,
}, },
.ring = { .ring = {
@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {
.mc_wait_for_idle = &rv515_mc_wait_for_idle, .mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush, .tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {
.mc_wait_for_idle = &r520_mc_wait_for_idle, .mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = { .gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush, .tlb_flush = &rv370_pcie_gart_tlb_flush,
.get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page, .set_page = &rv370_pcie_gart_set_page,
}, },
.ring = { .ring = {
@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush, .tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush, .tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush, .tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush, .tlb_flush = &r600_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush, .tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush, .tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush, .tlb_flush = &evergreen_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.ring = { .ring = {
@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush, .tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {
@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter, .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush, .tlb_flush = &cayman_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {
@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {
.get_gpu_clock_counter = &si_get_gpu_clock_counter, .get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &si_pcie_gart_tlb_flush, .tlb_flush = &si_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {
@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter, .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush, .tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {
@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter, .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = { .gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush, .tlb_flush = &cik_pcie_gart_tlb_flush,
.get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page, .set_page = &rs600_gart_set_page,
}, },
.vm = { .vm = {

View File

@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev); int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev); void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring); void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev); int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev); int r100_irq_process(struct radeon_device *rdev);
@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence); struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p); extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev); extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev); extern void r300_set_reg_safe(struct radeon_device *rdev);
@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev); extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev); extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev);
uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev); int rs400_gart_init(struct radeon_device *rdev);
@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev); void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev); void rs600_gart_tlb_flush(struct radeon_device *rdev);
uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
uint64_t addr, uint32_t flags); uint64_t entry);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev); void rs600_bandwidth_update(struct radeon_device *rdev);

View File

@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
rdev->dummy_page.page = NULL; rdev->dummy_page.page = NULL;
return -ENOMEM; return -ENOMEM;
} }
rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
RADEON_GART_PAGE_DUMMY);
return 0; return 0;
} }

Some files were not shown because too many files have changed in this diff Show More