Skip to content

Commit c4ad98e

Browse files
author
Marc Zyngier
committed
KVM: arm64: Assume write fault on S1PTW permission fault on instruction fetch
KVM currently assumes that an instruction abort can never be a write. This is in general true, except when the abort is triggered by a S1PTW on instruction fetch that tries to update the S1 page tables (to set AF, for example). This can happen if the page tables have been paged out and brought back in without seeing a direct write to them (they are thus marked read only), and the fault handling code will make the PT executable(!) instead of writable. The guest gets stuck forever. In these conditions, the permission fault must be considered as a write so that the Stage-1 update can take place. This is essentially the I-side equivalent of the problem fixed by 60e21a0 ("arm64: KVM: Take S1 walks into account when determining S2 write faults"). Update kvm_is_write_fault() to return true on IABT+S1PTW, and introduce kvm_vcpu_trap_is_exec_fault() that only return true when no faulting on a S1 fault. Additionally, kvm_vcpu_dabt_iss1tw() is renamed to kvm_vcpu_abt_iss1tw(), as the above makes it plain that it isn't specific to data abort. Signed-off-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Will Deacon <will@kernel.org> Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200915104218.1284701-2-maz@kernel.org
1 parent 7b75cd5 commit c4ad98e

File tree

3 files changed

+13
-5
lines changed

3 files changed

+13
-5
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -298,15 +298,15 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
298298
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
299299
}
300300

301-
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
301+
static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
302302
{
303303
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
304304
}
305305

306306
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
307307
{
308308
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
309-
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
309+
kvm_vcpu_abt_iss1tw(vcpu); /* AF/DBM update */
310310
}
311311

312312
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
@@ -335,6 +335,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
335335
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
336336
}
337337

338+
static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
339+
{
340+
return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
341+
}
342+
338343
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
339344
{
340345
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
@@ -372,6 +377,9 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
372377

373378
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
374379
{
380+
if (kvm_vcpu_abt_iss1tw(vcpu))
381+
return true;
382+
375383
if (kvm_vcpu_trap_is_iabt(vcpu))
376384
return false;
377385

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
445445
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
446446
kvm_vcpu_dabt_isvalid(vcpu) &&
447447
!kvm_vcpu_abt_issea(vcpu) &&
448-
!kvm_vcpu_dabt_iss1tw(vcpu);
448+
!kvm_vcpu_abt_iss1tw(vcpu);
449449

450450
if (valid) {
451451
int ret = __vgic_v2_perform_cpuif_access(vcpu);

arch/arm64/kvm/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1843,7 +1843,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
18431843
struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
18441844

18451845
write_fault = kvm_is_write_fault(vcpu);
1846-
exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1846+
exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
18471847
VM_BUG_ON(write_fault && exec_fault);
18481848

18491849
if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
@@ -2125,7 +2125,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
21252125
goto out;
21262126
}
21272127

2128-
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
2128+
if (kvm_vcpu_abt_iss1tw(vcpu)) {
21292129
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
21302130
ret = 1;
21312131
goto out_unlock;

0 commit comments

Comments
 (0)