MIPS: Export some tlbex internals for KVM to use
Export to TLB exception code generating functions so that KVM can construct a fast TLB refill handler for guest context without reinventing the wheel quite so much. Signed-off-by: James Hogan <james.hogan@imgtec.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
This commit is contained in:
26
arch/mips/include/asm/tlbex.h
Normal file
26
arch/mips/include/asm/tlbex.h
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
#ifndef __ASM_TLBEX_H
|
||||||
|
#define __ASM_TLBEX_H
|
||||||
|
|
||||||
|
#include <asm/uasm.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Write random or indexed TLB entry, and care about the hazards from
|
||||||
|
* the preceding mtc0 and for the following eret.
|
||||||
|
*/
|
||||||
|
enum tlb_write_entry {
|
||||||
|
tlb_random,
|
||||||
|
tlb_indexed
|
||||||
|
};
|
||||||
|
|
||||||
|
extern int pgd_reg;
|
||||||
|
|
||||||
|
void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||||
|
unsigned int tmp, unsigned int ptr);
|
||||||
|
void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr);
|
||||||
|
void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr);
|
||||||
|
void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep);
|
||||||
|
void build_tlb_write_entry(u32 **p, struct uasm_label **l,
|
||||||
|
struct uasm_reloc **r,
|
||||||
|
enum tlb_write_entry wmode);
|
||||||
|
|
||||||
|
#endif /* __ASM_TLBEX_H */
|
||||||
@@ -35,6 +35,7 @@
|
|||||||
#include <asm/war.h>
|
#include <asm/war.h>
|
||||||
#include <asm/uasm.h>
|
#include <asm/uasm.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
|
#include <asm/tlbex.h>
|
||||||
|
|
||||||
static int mips_xpa_disabled;
|
static int mips_xpa_disabled;
|
||||||
|
|
||||||
@@ -345,7 +346,8 @@ static int allocate_kscratch(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int scratch_reg;
|
static int scratch_reg;
|
||||||
static int pgd_reg;
|
int pgd_reg;
|
||||||
|
EXPORT_SYMBOL_GPL(pgd_reg);
|
||||||
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
|
enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
|
||||||
|
|
||||||
static struct work_registers build_get_work_registers(u32 **p)
|
static struct work_registers build_get_work_registers(u32 **p)
|
||||||
@@ -497,15 +499,9 @@ static void __maybe_unused build_tlb_probe_entry(u32 **p)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
void build_tlb_write_entry(u32 **p, struct uasm_label **l,
|
||||||
* Write random or indexed TLB entry, and care about the hazards from
|
struct uasm_reloc **r,
|
||||||
* the preceding mtc0 and for the following eret.
|
enum tlb_write_entry wmode)
|
||||||
*/
|
|
||||||
enum tlb_write_entry { tlb_random, tlb_indexed };
|
|
||||||
|
|
||||||
static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
|
|
||||||
struct uasm_reloc **r,
|
|
||||||
enum tlb_write_entry wmode)
|
|
||||||
{
|
{
|
||||||
void(*tlbw)(u32 **) = NULL;
|
void(*tlbw)(u32 **) = NULL;
|
||||||
|
|
||||||
@@ -628,6 +624,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(build_tlb_write_entry);
|
||||||
|
|
||||||
static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
|
static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
|
||||||
unsigned int reg)
|
unsigned int reg)
|
||||||
@@ -782,9 +779,8 @@ static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
|
|||||||
* TMP and PTR are scratch.
|
* TMP and PTR are scratch.
|
||||||
* TMP will be clobbered, PTR will hold the pmd entry.
|
* TMP will be clobbered, PTR will hold the pmd entry.
|
||||||
*/
|
*/
|
||||||
static void
|
void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
||||||
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
unsigned int tmp, unsigned int ptr)
|
||||||
unsigned int tmp, unsigned int ptr)
|
|
||||||
{
|
{
|
||||||
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
|
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
|
||||||
long pgdc = (long)pgd_current;
|
long pgdc = (long)pgd_current;
|
||||||
@@ -860,6 +856,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
|||||||
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
|
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(build_get_pmde64);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BVADDR is the faulting address, PTR is scratch.
|
* BVADDR is the faulting address, PTR is scratch.
|
||||||
@@ -935,8 +932,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
|
|||||||
* TMP and PTR are scratch.
|
* TMP and PTR are scratch.
|
||||||
* TMP will be clobbered, PTR will hold the pgd entry.
|
* TMP will be clobbered, PTR will hold the pgd entry.
|
||||||
*/
|
*/
|
||||||
static void __maybe_unused
|
void build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
|
||||||
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
|
|
||||||
{
|
{
|
||||||
if (pgd_reg != -1) {
|
if (pgd_reg != -1) {
|
||||||
/* pgd is in pgd_reg */
|
/* pgd is in pgd_reg */
|
||||||
@@ -961,6 +957,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
|
|||||||
uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
|
uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
|
||||||
uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
|
uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(build_get_pgde32);
|
||||||
|
|
||||||
#endif /* !CONFIG_64BIT */
|
#endif /* !CONFIG_64BIT */
|
||||||
|
|
||||||
@@ -990,7 +987,7 @@ static void build_adjust_context(u32 **p, unsigned int ctx)
|
|||||||
uasm_i_andi(p, ctx, ctx, mask);
|
uasm_i_andi(p, ctx, ctx, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
|
void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Bug workaround for the Nevada. It seems as if under certain
|
* Bug workaround for the Nevada. It seems as if under certain
|
||||||
@@ -1014,8 +1011,9 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
|
|||||||
build_adjust_context(p, tmp);
|
build_adjust_context(p, tmp);
|
||||||
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
|
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(build_get_ptep);
|
||||||
|
|
||||||
static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
|
void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
|
||||||
{
|
{
|
||||||
int pte_off_even = 0;
|
int pte_off_even = 0;
|
||||||
int pte_off_odd = sizeof(pte_t);
|
int pte_off_odd = sizeof(pte_t);
|
||||||
@@ -1064,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
|
|||||||
UASM_i_MTC0(p, 0, C0_ENTRYLO1);
|
UASM_i_MTC0(p, 0, C0_ENTRYLO1);
|
||||||
UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
|
UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(build_update_entries);
|
||||||
|
|
||||||
struct mips_huge_tlb_info {
|
struct mips_huge_tlb_info {
|
||||||
int huge_pte;
|
int huge_pte;
|
||||||
|
|||||||
Reference in New Issue
Block a user