Skip to content

Commit 1508bae

Browse files
committed
Merge tag 'kvmarm-fixes-6.13-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 changes for 6.13, part Rust-for-Linux#2 - Constrain invalidations from GICR_INVLPIR to only affect the LPI INTID space - Set of robustness improvements to the management of vgic irqs and GIC ITS table entries - Fix compilation issue w/ CONFIG_CC_OPTIMIZE_FOR_SIZE=y where set_sysreg_masks() wasn't getting inlined, breaking check for a constant sysreg index - Correct KVM's vPMU overflow condition to match the architecture for hyp and non-hyp counters
2 parents 9ee62c3 + 13905f4 commit 1508bae

File tree

14 files changed

+172
-118
lines changed

14 files changed

+172
-118
lines changed

arch/arm64/kvm/nested.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -951,7 +951,7 @@ u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
951951
return v;
952952
}
953953

954-
static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
954+
static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
955955
{
956956
int i = sr - __SANITISED_REG_START__;
957957

arch/arm64/kvm/pmu-emul.c

Lines changed: 45 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -274,12 +274,23 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
274274
irq_work_sync(&vcpu->arch.pmu.overflow_work);
275275
}
276276

277-
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
277+
static u64 kvm_pmu_hyp_counter_mask(struct kvm_vcpu *vcpu)
278278
{
279-
unsigned int hpmn;
279+
unsigned int hpmn, n;
280280

281-
if (!vcpu_has_nv(vcpu) || idx == ARMV8_PMU_CYCLE_IDX)
282-
return false;
281+
if (!vcpu_has_nv(vcpu))
282+
return 0;
283+
284+
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
285+
n = vcpu->kvm->arch.pmcr_n;
286+
287+
/*
288+
* Programming HPMN to a value greater than PMCR_EL0.N is
289+
* CONSTRAINED UNPREDICTABLE. Make the implementation choice that an
290+
* UNKNOWN number of counters (in our case, zero) are reserved for EL2.
291+
*/
292+
if (hpmn >= n)
293+
return 0;
283294

284295
/*
285296
* Programming HPMN=0 is CONSTRAINED UNPREDICTABLE if FEAT_HPMN0 isn't
@@ -288,20 +299,22 @@ bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
288299
* implementation choice that all counters are included in the second
289300
* range reserved for EL2/EL3.
290301
*/
291-
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
292-
return idx >= hpmn;
302+
return GENMASK(n - 1, hpmn);
303+
}
304+
305+
bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx)
306+
{
307+
return kvm_pmu_hyp_counter_mask(vcpu) & BIT(idx);
293308
}
294309

295310
u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu)
296311
{
297312
u64 mask = kvm_pmu_implemented_counter_mask(vcpu);
298-
u64 hpmn;
299313

300314
if (!vcpu_has_nv(vcpu) || vcpu_is_el2(vcpu))
301315
return mask;
302316

303-
hpmn = SYS_FIELD_GET(MDCR_EL2, HPMN, __vcpu_sys_reg(vcpu, MDCR_EL2));
304-
return mask & ~GENMASK(vcpu->kvm->arch.pmcr_n - 1, hpmn);
317+
return mask & ~kvm_pmu_hyp_counter_mask(vcpu);
305318
}
306319

307320
u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu)
@@ -375,15 +388,30 @@ void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
375388
}
376389
}
377390

378-
static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
391+
/*
392+
* Returns the PMU overflow state, which is true if there exists an event
393+
* counter where the values of the global enable control, PMOVSSET_EL0[n], and
394+
* PMINTENSET_EL1[n] are all 1.
395+
*/
396+
static bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
379397
{
380-
u64 reg = 0;
398+
u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
381399

382-
if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) {
383-
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
384-
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
385-
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
386-
}
400+
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
401+
402+
/*
403+
* PMCR_EL0.E is the global enable control for event counters available
404+
* to EL0 and EL1.
405+
*/
406+
if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E))
407+
reg &= kvm_pmu_hyp_counter_mask(vcpu);
408+
409+
/*
410+
* Otherwise, MDCR_EL2.HPME is the global enable control for event
411+
* counters reserved for EL2.
412+
*/
413+
if (!(vcpu_read_sys_reg(vcpu, MDCR_EL2) & MDCR_EL2_HPME))
414+
reg &= ~kvm_pmu_hyp_counter_mask(vcpu);
387415

388416
return reg;
389417
}
@@ -396,7 +424,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
396424
if (!kvm_vcpu_has_pmu(vcpu))
397425
return;
398426

399-
overflow = !!kvm_pmu_overflow_status(vcpu);
427+
overflow = kvm_pmu_overflow_status(vcpu);
400428
if (pmu->irq_level == overflow)
401429
return;
402430

arch/arm64/kvm/vgic/vgic-debug.c

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,10 @@ static int vgic_debug_show(struct seq_file *s, void *v)
287287
* Expect this to succeed, as iter_mark_lpis() takes a reference on
288288
* every LPI to be visited.
289289
*/
290-
irq = vgic_get_irq(kvm, vcpu, iter->intid);
290+
if (iter->intid < VGIC_NR_PRIVATE_IRQS)
291+
irq = vgic_get_vcpu_irq(vcpu, iter->intid);
292+
else
293+
irq = vgic_get_irq(kvm, iter->intid);
291294
if (WARN_ON_ONCE(!irq))
292295
return -EINVAL;
293296

arch/arm64/kvm/vgic/vgic-init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -322,7 +322,7 @@ int vgic_init(struct kvm *kvm)
322322
goto out;
323323

324324
for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
325-
struct vgic_irq *irq = vgic_get_irq(kvm, vcpu, i);
325+
struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, i);
326326

327327
switch (dist->vgic_model) {
328328
case KVM_DEV_TYPE_ARM_VGIC_V3:

arch/arm64/kvm/vgic/vgic-its.c

Lines changed: 54 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,41 @@ static int vgic_its_commit_v0(struct vgic_its *its);
3131
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
3232
struct kvm_vcpu *filter_vcpu, bool needs_inv);
3333

34+
#define vgic_its_read_entry_lock(i, g, valp, t) \
35+
({ \
36+
int __sz = vgic_its_get_abi(i)->t##_esz; \
37+
struct kvm *__k = (i)->dev->kvm; \
38+
int __ret; \
39+
\
40+
BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
41+
sizeof(*(valp)) != ABI_0_ESZ); \
42+
if (NR_ITS_ABIS > 1 && \
43+
KVM_BUG_ON(__sz != sizeof(*(valp)), __k)) \
44+
__ret = -EINVAL; \
45+
else \
46+
__ret = kvm_read_guest_lock(__k, (g), \
47+
valp, __sz); \
48+
__ret; \
49+
})
50+
51+
#define vgic_its_write_entry_lock(i, g, val, t) \
52+
({ \
53+
int __sz = vgic_its_get_abi(i)->t##_esz; \
54+
struct kvm *__k = (i)->dev->kvm; \
55+
typeof(val) __v = (val); \
56+
int __ret; \
57+
\
58+
BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
59+
sizeof(__v) != ABI_0_ESZ); \
60+
if (NR_ITS_ABIS > 1 && \
61+
KVM_BUG_ON(__sz != sizeof(__v), __k)) \
62+
__ret = -EINVAL; \
63+
else \
64+
__ret = vgic_write_guest_lock(__k, (g), \
65+
&__v, __sz); \
66+
__ret; \
67+
})
68+
3469
/*
3570
* Creates a new (reference to a) struct vgic_irq for a given LPI.
3671
* If this LPI is already mapped on another ITS, we increase its refcount
@@ -42,7 +77,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
4277
struct kvm_vcpu *vcpu)
4378
{
4479
struct vgic_dist *dist = &kvm->arch.vgic;
45-
struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
80+
struct vgic_irq *irq = vgic_get_irq(kvm, intid), *oldirq;
4681
unsigned long flags;
4782
int ret;
4883

@@ -419,7 +454,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
419454
last_byte_offset = byte_offset;
420455
}
421456

422-
irq = vgic_get_irq(vcpu->kvm, NULL, intid);
457+
irq = vgic_get_irq(vcpu->kvm, intid);
423458
if (!irq)
424459
continue;
425460

@@ -794,7 +829,7 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
794829

795830
its_free_ite(kvm, ite);
796831

797-
return vgic_its_write_entry_lock(its, gpa, 0, ite_esz);
832+
return vgic_its_write_entry_lock(its, gpa, 0ULL, ite);
798833
}
799834

800835
return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
@@ -1143,7 +1178,6 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
11431178
bool valid = its_cmd_get_validbit(its_cmd);
11441179
u8 num_eventid_bits = its_cmd_get_size(its_cmd);
11451180
gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1146-
int dte_esz = vgic_its_get_abi(its)->dte_esz;
11471181
struct its_device *device;
11481182
gpa_t gpa;
11491183

@@ -1168,7 +1202,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
11681202
* is an error, so we are done in any case.
11691203
*/
11701204
if (!valid)
1171-
return vgic_its_write_entry_lock(its, gpa, 0, dte_esz);
1205+
return vgic_its_write_entry_lock(its, gpa, 0ULL, dte);
11721206

11731207
device = vgic_its_alloc_device(its, device_id, itt_addr,
11741208
num_eventid_bits);
@@ -1288,7 +1322,7 @@ int vgic_its_invall(struct kvm_vcpu *vcpu)
12881322
unsigned long intid;
12891323

12901324
xa_for_each(&dist->lpi_xa, intid, irq) {
1291-
irq = vgic_get_irq(kvm, NULL, intid);
1325+
irq = vgic_get_irq(kvm, intid);
12921326
if (!irq)
12931327
continue;
12941328

@@ -1354,7 +1388,7 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
13541388
return 0;
13551389

13561390
xa_for_each(&dist->lpi_xa, intid, irq) {
1357-
irq = vgic_get_irq(kvm, NULL, intid);
1391+
irq = vgic_get_irq(kvm, intid);
13581392
if (!irq)
13591393
continue;
13601394

@@ -2090,7 +2124,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
20902124
* vgic_its_save_ite - Save an interrupt translation entry at @gpa
20912125
*/
20922126
static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2093-
struct its_ite *ite, gpa_t gpa, int ite_esz)
2127+
struct its_ite *ite, gpa_t gpa)
20942128
{
20952129
u32 next_offset;
20962130
u64 val;
@@ -2101,7 +2135,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
21012135
ite->collection->collection_id;
21022136
val = cpu_to_le64(val);
21032137

2104-
return vgic_its_write_entry_lock(its, gpa, val, ite_esz);
2138+
return vgic_its_write_entry_lock(its, gpa, val, ite);
21052139
}
21062140

21072141
/**
@@ -2201,7 +2235,7 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
22012235
if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1)
22022236
return -EACCES;
22032237

2204-
ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2238+
ret = vgic_its_save_ite(its, device, ite, gpa);
22052239
if (ret)
22062240
return ret;
22072241
}
@@ -2240,10 +2274,9 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
22402274
* @its: ITS handle
22412275
* @dev: ITS device
22422276
* @ptr: GPA
2243-
* @dte_esz: device table entry size
22442277
*/
22452278
static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2246-
gpa_t ptr, int dte_esz)
2279+
gpa_t ptr)
22472280
{
22482281
u64 val, itt_addr_field;
22492282
u32 next_offset;
@@ -2256,7 +2289,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
22562289
(dev->num_eventid_bits - 1));
22572290
val = cpu_to_le64(val);
22582291

2259-
return vgic_its_write_entry_lock(its, ptr, val, dte_esz);
2292+
return vgic_its_write_entry_lock(its, ptr, val, dte);
22602293
}
22612294

22622295
/**
@@ -2332,10 +2365,8 @@ static int vgic_its_device_cmp(void *priv, const struct list_head *a,
23322365
*/
23332366
static int vgic_its_save_device_tables(struct vgic_its *its)
23342367
{
2335-
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
23362368
u64 baser = its->baser_device_table;
23372369
struct its_device *dev;
2338-
int dte_esz = abi->dte_esz;
23392370

23402371
if (!(baser & GITS_BASER_VALID))
23412372
return 0;
@@ -2354,7 +2385,7 @@ static int vgic_its_save_device_tables(struct vgic_its *its)
23542385
if (ret)
23552386
return ret;
23562387

2357-
ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2388+
ret = vgic_its_save_dte(its, dev, eaddr);
23582389
if (ret)
23592390
return ret;
23602391
}
@@ -2435,7 +2466,7 @@ static int vgic_its_restore_device_tables(struct vgic_its *its)
24352466

24362467
static int vgic_its_save_cte(struct vgic_its *its,
24372468
struct its_collection *collection,
2438-
gpa_t gpa, int esz)
2469+
gpa_t gpa)
24392470
{
24402471
u64 val;
24412472

@@ -2444,23 +2475,23 @@ static int vgic_its_save_cte(struct vgic_its *its,
24442475
collection->collection_id);
24452476
val = cpu_to_le64(val);
24462477

2447-
return vgic_its_write_entry_lock(its, gpa, val, esz);
2478+
return vgic_its_write_entry_lock(its, gpa, val, cte);
24482479
}
24492480

24502481
/*
24512482
* Restore a collection entry into the ITS collection table.
24522483
* Return +1 on success, 0 if the entry was invalid (which should be
24532484
* interpreted as end-of-table), and a negative error value for generic errors.
24542485
*/
2455-
static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2486+
static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa)
24562487
{
24572488
struct its_collection *collection;
24582489
struct kvm *kvm = its->dev->kvm;
24592490
u32 target_addr, coll_id;
24602491
u64 val;
24612492
int ret;
24622493

2463-
ret = vgic_its_read_entry_lock(its, gpa, &val, esz);
2494+
ret = vgic_its_read_entry_lock(its, gpa, &val, cte);
24642495
if (ret)
24652496
return ret;
24662497
val = le64_to_cpu(val);
@@ -2507,7 +2538,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
25072538
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
25082539

25092540
list_for_each_entry(collection, &its->collection_list, coll_list) {
2510-
ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2541+
ret = vgic_its_save_cte(its, collection, gpa);
25112542
if (ret)
25122543
return ret;
25132544
gpa += cte_esz;
@@ -2521,7 +2552,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
25212552
* table is not fully filled, add a last dummy element
25222553
* with valid bit unset
25232554
*/
2524-
return vgic_its_write_entry_lock(its, gpa, 0, cte_esz);
2555+
return vgic_its_write_entry_lock(its, gpa, 0ULL, cte);
25252556
}
25262557

25272558
/*
@@ -2546,7 +2577,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
25462577
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
25472578

25482579
while (read < max_size) {
2549-
ret = vgic_its_restore_cte(its, gpa, cte_esz);
2580+
ret = vgic_its_restore_cte(its, gpa);
25502581
if (ret <= 0)
25512582
break;
25522583
gpa += cte_esz;

0 commit comments

Comments
 (0)