x86: userspace can now hide nested VMX features from guests; nested VMX can now run Hyper-V in a guest; support for AVX512_4VNNIW and AVX512_FMAPS in KVM; infrastructure support for virtual Intel GPUs. PPC: support for KVM guests on POWER9; improved support for interrupt polling; optimizations and cleanups. s390: two small optimizations, more stuff is in flight and will be in 4.11. ARM: support for the GICv3 ITS on 32bit platforms. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQExBAABCAAbBQJYTkP0FBxwYm9uemluaUByZWRoYXQuY29tAAoJEL/70l94x66D lZIH/iT1n9OQXcuTpYYnQhuCenzI3GZZOIMTbCvK2i5bo0FIJKxVn0EiAAqZSXvO nO185FqjOgLuJ1AD1kJuxzye5suuQp4HIPWWgNHcexLuy43WXWKZe0IQlJ4zM2Xf u31HakpFmVDD+Cd1qN3yDXtDrRQ79/xQn2kw7CWb8olp+pVqwbceN3IVie9QYU+3 gCz0qU6As0aQIwq2PyalOe03sO10PZlm4XhsoXgWPG7P18BMRhNLTDqhLhu7A/ry qElVMANT7LSNLzlwNdpzdK8rVuKxETwjlc1UP8vSuhrwad4zM2JJ1Exk26nC2NaG D0j4tRSyGFIdx6lukZm7HmiSHZ0= =mkoB -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM updates from Paolo Bonzini: "Small release, the most interesting stuff is x86 nested virt improvements. x86: - userspace can now hide nested VMX features from guests - nested VMX can now run Hyper-V in a guest - support for AVX512_4VNNIW and AVX512_FMAPS in KVM - infrastructure support for virtual Intel GPUs. PPC: - support for KVM guests on POWER9 - improved support for interrupt polling - optimizations and cleanups. s390: - two small optimizations, more stuff is in flight and will be in 4.11. ARM: - support for the GICv3 ITS on 32bit platforms" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (94 commits) arm64: KVM: pmu: Reset PMSELR_EL0.SEL to a sane value before entering the guest KVM: arm/arm64: timer: Check for properly initialized timer on init KVM: arm/arm64: vgic-v2: Limit ITARGETSR bits to number of VCPUs KVM: x86: Handle the kthread worker using the new API KVM: nVMX: invvpid handling improvements KVM: nVMX: check host CR3 on vmentry and vmexit KVM: nVMX: introduce nested_vmx_load_cr3 and call it on vmentry KVM: nVMX: propagate errors from prepare_vmcs02 KVM: nVMX: fix CR3 load if L2 uses PAE paging and EPT KVM: nVMX: load GUEST_EFER after GUEST_CR0 during emulated VM-entry KVM: nVMX: generate MSR_IA32_CR{0,4}_FIXED1 from guest CPUID KVM: nVMX: fix checks on CR{0,4} during virtual VMX operation KVM: nVMX: support restore of VMX capability MSRs KVM: nVMX: generate non-true VMX MSRs based on true versions KVM: x86: Do not clear RFLAGS.TF when a singlestep trap occurs. KVM: x86: Add kvm_skip_emulated_instruction and use it. KVM: VMX: Move skip_emulated_instruction out of nested_vmx_check_vmcs12 KVM: VMX: Reorder some skip_emulated_instruction calls KVM: x86: Add a return value to kvm_emulate_cpuid KVM: PPC: Book3S: Move prototypes for KVM functions into kvm_ppc.h ...
240 lines
4.3 KiB
ArmAsm
240 lines
4.3 KiB
ArmAsm
/*
|
|
* This file contains low level CPU setup functions.
|
|
* Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*
|
|
*/
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/page.h>
|
|
#include <asm/cputable.h>
|
|
#include <asm/ppc_asm.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/book3s/64/mmu-hash.h>
|
|
|
|
/* Entry: r3 = crap, r4 = ptr to cputable entry
|
|
*
|
|
* Note that we can be called twice for pseudo-PVRs
|
|
*/
|
|
_GLOBAL(__setup_cpu_power7)
|
|
mflr r11
|
|
bl __init_hvmode_206
|
|
mtlr r11
|
|
beqlr
|
|
li r0,0
|
|
mtspr SPRN_LPID,r0
|
|
mfspr r3,SPRN_LPCR
|
|
bl __init_LPCR
|
|
bl __init_tlb_power7
|
|
mtlr r11
|
|
blr
|
|
|
|
_GLOBAL(__restore_cpu_power7)
|
|
mflr r11
|
|
mfmsr r3
|
|
rldicl. r0,r3,4,63
|
|
beqlr
|
|
li r0,0
|
|
mtspr SPRN_LPID,r0
|
|
mfspr r3,SPRN_LPCR
|
|
bl __init_LPCR
|
|
bl __init_tlb_power7
|
|
mtlr r11
|
|
blr
|
|
|
|
_GLOBAL(__setup_cpu_power8)
|
|
mflr r11
|
|
bl __init_FSCR
|
|
bl __init_PMU
|
|
bl __init_PMU_ISA207
|
|
bl __init_hvmode_206
|
|
mtlr r11
|
|
beqlr
|
|
li r0,0
|
|
mtspr SPRN_LPID,r0
|
|
mfspr r3,SPRN_LPCR
|
|
ori r3, r3, LPCR_PECEDH
|
|
bl __init_LPCR
|
|
bl __init_HFSCR
|
|
bl __init_tlb_power8
|
|
bl __init_PMU_HV
|
|
bl __init_PMU_HV_ISA207
|
|
mtlr r11
|
|
blr
|
|
|
|
_GLOBAL(__restore_cpu_power8)
|
|
mflr r11
|
|
bl __init_FSCR
|
|
bl __init_PMU
|
|
bl __init_PMU_ISA207
|
|
mfmsr r3
|
|
rldicl. r0,r3,4,63
|
|
mtlr r11
|
|
beqlr
|
|
li r0,0
|
|
mtspr SPRN_LPID,r0
|
|
mfspr r3,SPRN_LPCR
|
|
ori r3, r3, LPCR_PECEDH
|
|
bl __init_LPCR
|
|
bl __init_HFSCR
|
|
bl __init_tlb_power8
|
|
bl __init_PMU_HV
|
|
bl __init_PMU_HV_ISA207
|
|
mtlr r11
|
|
blr
|
|
|
|
_GLOBAL(__setup_cpu_power9)
|
|
mflr r11
|
|
bl __init_FSCR
|
|
bl __init_PMU
|
|
bl __init_hvmode_206
|
|
mtlr r11
|
|
beqlr
|
|
li r0,0
|
|
mtspr SPRN_LPID,r0
|
|
mfspr r3,SPRN_LPCR
|
|
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
|
|
or r3, r3, r4
|
|
bl __init_LPCR
|
|
bl __init_HFSCR
|
|
bl __init_tlb_power9
|
|
bl __init_PMU_HV
|
|
mtlr r11
|
|
blr
|
|
|
|
_GLOBAL(__restore_cpu_power9)
|
|
mflr r11
|
|
bl __init_FSCR
|
|
bl __init_PMU
|
|
mfmsr r3
|
|
rldicl. r0,r3,4,63
|
|
mtlr r11
|
|
beqlr
|
|
li r0,0
|
|
mtspr SPRN_LPID,r0
|
|
mfspr r3,SPRN_LPCR
|
|
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
|
|
or r3, r3, r4
|
|
bl __init_LPCR
|
|
bl __init_HFSCR
|
|
bl __init_tlb_power9
|
|
bl __init_PMU_HV
|
|
mtlr r11
|
|
blr
|
|
|
|
__init_hvmode_206:
|
|
/* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */
|
|
mfmsr r3
|
|
rldicl. r0,r3,4,63
|
|
bnelr
|
|
ld r5,CPU_SPEC_FEATURES(r4)
|
|
LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
|
|
xor r5,r5,r6
|
|
std r5,CPU_SPEC_FEATURES(r4)
|
|
blr
|
|
|
|
__init_LPCR:
|
|
/* Setup a sane LPCR:
|
|
* Called with initial LPCR in R3
|
|
*
|
|
* LPES = 0b01 (HSRR0/1 used for 0x500)
|
|
* PECE = 0b111
|
|
* DPFD = 4
|
|
* HDICE = 0
|
|
* VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
|
|
* VRMASD = 0b10000 (L=1, LP=00)
|
|
*
|
|
* Other bits untouched for now
|
|
*/
|
|
li r5,1
|
|
rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
|
|
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
|
|
li r5,4
|
|
rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
|
|
clrrdi r3,r3,1 /* clear HDICE */
|
|
li r5,4
|
|
rldimi r3,r5, LPCR_VC_SH, 0
|
|
li r5,0x10
|
|
rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
|
|
mtspr SPRN_LPCR,r3
|
|
isync
|
|
blr
|
|
|
|
__init_FSCR:
|
|
mfspr r3,SPRN_FSCR
|
|
ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
|
|
mtspr SPRN_FSCR,r3
|
|
blr
|
|
|
|
__init_HFSCR:
|
|
mfspr r3,SPRN_HFSCR
|
|
ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
|
|
HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP
|
|
mtspr SPRN_HFSCR,r3
|
|
blr
|
|
|
|
/*
|
|
* Clear the TLB using the specified IS form of tlbiel instruction
|
|
* (invalidate by congruence class). P7 has 128 CCs., P8 has 512.
|
|
*/
|
|
__init_tlb_power7:
|
|
li r6,POWER7_TLB_SETS
|
|
mtctr r6
|
|
li r7,0xc00 /* IS field = 0b11 */
|
|
ptesync
|
|
2: tlbiel r7
|
|
addi r7,r7,0x1000
|
|
bdnz 2b
|
|
ptesync
|
|
1: blr
|
|
|
|
__init_tlb_power8:
|
|
li r6,POWER8_TLB_SETS
|
|
mtctr r6
|
|
li r7,0xc00 /* IS field = 0b11 */
|
|
ptesync
|
|
2: tlbiel r7
|
|
addi r7,r7,0x1000
|
|
bdnz 2b
|
|
ptesync
|
|
1: blr
|
|
|
|
__init_tlb_power9:
|
|
li r6,POWER9_TLB_SETS_HASH
|
|
mtctr r6
|
|
li r7,0xc00 /* IS field = 0b11 */
|
|
ptesync
|
|
2: tlbiel r7
|
|
addi r7,r7,0x1000
|
|
bdnz 2b
|
|
ptesync
|
|
1: blr
|
|
|
|
__init_PMU_HV:
|
|
li r5,0
|
|
mtspr SPRN_MMCRC,r5
|
|
blr
|
|
|
|
__init_PMU_HV_ISA207:
|
|
li r5,0
|
|
mtspr SPRN_MMCRH,r5
|
|
blr
|
|
|
|
__init_PMU:
|
|
li r5,0
|
|
mtspr SPRN_MMCRA,r5
|
|
mtspr SPRN_MMCR0,r5
|
|
mtspr SPRN_MMCR1,r5
|
|
mtspr SPRN_MMCR2,r5
|
|
blr
|
|
|
|
__init_PMU_ISA207:
|
|
li r5,0
|
|
mtspr SPRN_MMCRS,r5
|
|
blr
|