commit 88a84ccccb3966bcc3f309cdb76092a9892c0260 upstream.
KVM doesn't expect any synchronous exceptions when executing, any such
exception leads to a panic(). AT instructions access the guest page
tables, and can cause a synchronous external abort to be taken.
The arm-arm is unclear on what should happen if the guest has configured
the hardware update of the access-flag, and a memory type in TCR_EL1 that
does not support atomic operations. B2.2.6 "Possible implementation
restrictions on using atomic instructions" from DDI0487F.a lists
synchronous external abort as a possible behaviour of atomic instructions
that target memory that isn't writeback cacheable, but the page table
walker may behave differently.
Make KVM robust to synchronous exceptions caused by AT instructions.
Add a get_user() style helper for AT instructions that returns -EFAULT
if an exception was generated.
While KVM's version of the exception table mixes synchronous and
asynchronous exceptions, only one of these can occur at each location.
Re-enter the guest when the AT instructions take an exception on the
assumption the guest will take the same exception. This isn't guaranteed
to make forward progress, as the AT instructions may always walk the page
tables, but guest execution may use the translation cached in the TLB.
This isn't a problem, as since commit 5dcd0fdbb492 ("KVM: arm64: Defer guest
entry when an asynchronous exception is pending"), KVM will return to the
host to process IRQs allowing the rest of the system to keep running.
Cc: stable@vger.kernel.org # v4.19
Signed-off-by: James Morse <james.morse@arm.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
160 lines
4.2 KiB
C
160 lines
4.2 KiB
C
/*
|
|
* Copyright (C) 2012,2013 - ARM Ltd
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#ifndef __ARM_KVM_ASM_H__
|
|
#define __ARM_KVM_ASM_H__
|
|
|
|
#include <asm/virt.h>
|
|
|
|
#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
|
|
#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
|
|
|
|
#define ARM_EXIT_WITH_SERROR_BIT 31
|
|
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
|
|
#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
|
|
|
|
#define ARM_EXCEPTION_IRQ 0
|
|
#define ARM_EXCEPTION_EL1_SERROR 1
|
|
#define ARM_EXCEPTION_TRAP 2
|
|
/* The hyp-stub will return this for any kvm_call_hyp() call */
|
|
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/mm.h>
|
|
|
|
/* Translate a kernel address of @sym into its equivalent linear mapping */
|
|
#define kvm_ksym_ref(sym) \
|
|
({ \
|
|
void *val = &sym; \
|
|
if (!is_kernel_in_hyp_mode()) \
|
|
val = lm_alias(&sym); \
|
|
val; \
|
|
})
|
|
|
|
struct kvm;
|
|
struct kvm_vcpu;
|
|
|
|
extern char __kvm_hyp_init[];
|
|
extern char __kvm_hyp_init_end[];
|
|
|
|
extern char __kvm_hyp_vector[];
|
|
|
|
extern void __kvm_flush_vm_context(void);
|
|
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
|
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
|
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
|
|
|
|
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
|
|
|
|
extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);
|
|
|
|
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
|
|
|
|
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
|
extern u64 __vgic_v3_read_vmcr(void);
|
|
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
|
extern void __vgic_v3_init_lrs(void);
|
|
|
|
extern u32 __kvm_get_mdcr_el2(void);
|
|
|
|
extern u32 __init_stage2_translation(void);
|
|
|
|
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
|
|
#define __hyp_this_cpu_ptr(sym) \
|
|
({ \
|
|
void *__ptr = hyp_symbol_addr(sym); \
|
|
__ptr += read_sysreg(tpidr_el2); \
|
|
(typeof(&sym))__ptr; \
|
|
})
|
|
|
|
#define __hyp_this_cpu_read(sym) \
|
|
({ \
|
|
*__hyp_this_cpu_ptr(sym); \
|
|
})
|
|
|
|
#define __KVM_EXTABLE(from, to) \
|
|
" .pushsection __kvm_ex_table, \"a\"\n" \
|
|
" .align 3\n" \
|
|
" .long (" #from " - .), (" #to " - .)\n" \
|
|
" .popsection\n"
|
|
|
|
|
|
#define __kvm_at(at_op, addr) \
|
|
( { \
|
|
int __kvm_at_err = 0; \
|
|
u64 spsr, elr; \
|
|
asm volatile( \
|
|
" mrs %1, spsr_el2\n" \
|
|
" mrs %2, elr_el2\n" \
|
|
"1: at "at_op", %3\n" \
|
|
" isb\n" \
|
|
" b 9f\n" \
|
|
"2: msr spsr_el2, %1\n" \
|
|
" msr elr_el2, %2\n" \
|
|
" mov %w0, %4\n" \
|
|
"9:\n" \
|
|
__KVM_EXTABLE(1b, 2b) \
|
|
: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr) \
|
|
: "r" (addr), "i" (-EFAULT)); \
|
|
__kvm_at_err; \
|
|
} )
|
|
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
.macro hyp_adr_this_cpu reg, sym, tmp
|
|
adr_l \reg, \sym
|
|
mrs \tmp, tpidr_el2
|
|
add \reg, \reg, \tmp
|
|
.endm
|
|
|
|
.macro hyp_ldr_this_cpu reg, sym, tmp
|
|
adr_l \reg, \sym
|
|
mrs \tmp, tpidr_el2
|
|
ldr \reg, [\reg, \tmp]
|
|
.endm
|
|
|
|
.macro get_host_ctxt reg, tmp
|
|
hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp
|
|
.endm
|
|
|
|
.macro get_vcpu_ptr vcpu, ctxt
|
|
get_host_ctxt \ctxt, \vcpu
|
|
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
|
|
kern_hyp_va \vcpu
|
|
.endm
|
|
|
|
/*
|
|
* KVM extable for unexpected exceptions.
|
|
* In the same format _asm_extable, but output to a different section so that
|
|
* it can be mapped to EL2. The KVM version is not sorted. The caller must
|
|
* ensure:
|
|
* x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
|
|
* code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
|
|
*/
|
|
.macro _kvm_extable, from, to
|
|
.pushsection __kvm_ex_table, "a"
|
|
.align 3
|
|
.long (\from - .), (\to - .)
|
|
.popsection
|
|
.endm
|
|
|
|
#endif
|
|
|
|
#endif /* __ARM_KVM_ASM_H__ */
|