@@ -31,6 +31,41 @@ static int vgic_its_commit_v0(struct vgic_its *its);
31
31
static int update_lpi_config (struct kvm * kvm , struct vgic_irq * irq ,
32
32
struct kvm_vcpu * filter_vcpu , bool needs_inv );
33
33
34
+ #define vgic_its_read_entry_lock (i , g , valp , t ) \
35
+ ({ \
36
+ int __sz = vgic_its_get_abi(i)->t##_esz; \
37
+ struct kvm *__k = (i)->dev->kvm; \
38
+ int __ret; \
39
+ \
40
+ BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
41
+ sizeof(*(valp)) != ABI_0_ESZ); \
42
+ if (NR_ITS_ABIS > 1 && \
43
+ KVM_BUG_ON(__sz != sizeof(*(valp)), __k)) \
44
+ __ret = -EINVAL; \
45
+ else \
46
+ __ret = kvm_read_guest_lock(__k, (g), \
47
+ valp, __sz); \
48
+ __ret; \
49
+ })
50
+
51
+ #define vgic_its_write_entry_lock (i , g , val , t ) \
52
+ ({ \
53
+ int __sz = vgic_its_get_abi(i)->t##_esz; \
54
+ struct kvm *__k = (i)->dev->kvm; \
55
+ typeof(val) __v = (val); \
56
+ int __ret; \
57
+ \
58
+ BUILD_BUG_ON(NR_ITS_ABIS == 1 && \
59
+ sizeof(__v) != ABI_0_ESZ); \
60
+ if (NR_ITS_ABIS > 1 && \
61
+ KVM_BUG_ON(__sz != sizeof(__v), __k)) \
62
+ __ret = -EINVAL; \
63
+ else \
64
+ __ret = vgic_write_guest_lock(__k, (g), \
65
+ &__v, __sz); \
66
+ __ret; \
67
+ })
68
+
34
69
/*
35
70
* Creates a new (reference to a) struct vgic_irq for a given LPI.
36
71
* If this LPI is already mapped on another ITS, we increase its refcount
@@ -42,7 +77,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
42
77
struct kvm_vcpu * vcpu )
43
78
{
44
79
struct vgic_dist * dist = & kvm -> arch .vgic ;
45
- struct vgic_irq * irq = vgic_get_irq (kvm , NULL , intid ), * oldirq ;
80
+ struct vgic_irq * irq = vgic_get_irq (kvm , intid ), * oldirq ;
46
81
unsigned long flags ;
47
82
int ret ;
48
83
@@ -419,7 +454,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
419
454
last_byte_offset = byte_offset ;
420
455
}
421
456
422
- irq = vgic_get_irq (vcpu -> kvm , NULL , intid );
457
+ irq = vgic_get_irq (vcpu -> kvm , intid );
423
458
if (!irq )
424
459
continue ;
425
460
@@ -794,7 +829,7 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
794
829
795
830
its_free_ite (kvm , ite );
796
831
797
- return vgic_its_write_entry_lock (its , gpa , 0 , ite_esz );
832
+ return vgic_its_write_entry_lock (its , gpa , 0ULL , ite );
798
833
}
799
834
800
835
return E_ITS_DISCARD_UNMAPPED_INTERRUPT ;
@@ -1143,7 +1178,6 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1143
1178
bool valid = its_cmd_get_validbit (its_cmd );
1144
1179
u8 num_eventid_bits = its_cmd_get_size (its_cmd );
1145
1180
gpa_t itt_addr = its_cmd_get_ittaddr (its_cmd );
1146
- int dte_esz = vgic_its_get_abi (its )-> dte_esz ;
1147
1181
struct its_device * device ;
1148
1182
gpa_t gpa ;
1149
1183
@@ -1168,7 +1202,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1168
1202
* is an error, so we are done in any case.
1169
1203
*/
1170
1204
if (!valid )
1171
- return vgic_its_write_entry_lock (its , gpa , 0 , dte_esz );
1205
+ return vgic_its_write_entry_lock (its , gpa , 0ULL , dte );
1172
1206
1173
1207
device = vgic_its_alloc_device (its , device_id , itt_addr ,
1174
1208
num_eventid_bits );
@@ -1288,7 +1322,7 @@ int vgic_its_invall(struct kvm_vcpu *vcpu)
1288
1322
unsigned long intid ;
1289
1323
1290
1324
xa_for_each (& dist -> lpi_xa , intid , irq ) {
1291
- irq = vgic_get_irq (kvm , NULL , intid );
1325
+ irq = vgic_get_irq (kvm , intid );
1292
1326
if (!irq )
1293
1327
continue ;
1294
1328
@@ -1354,7 +1388,7 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1354
1388
return 0 ;
1355
1389
1356
1390
xa_for_each (& dist -> lpi_xa , intid , irq ) {
1357
- irq = vgic_get_irq (kvm , NULL , intid );
1391
+ irq = vgic_get_irq (kvm , intid );
1358
1392
if (!irq )
1359
1393
continue ;
1360
1394
@@ -2090,7 +2124,7 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2090
2124
* vgic_its_save_ite - Save an interrupt translation entry at @gpa
2091
2125
*/
2092
2126
static int vgic_its_save_ite (struct vgic_its * its , struct its_device * dev ,
2093
- struct its_ite * ite , gpa_t gpa , int ite_esz )
2127
+ struct its_ite * ite , gpa_t gpa )
2094
2128
{
2095
2129
u32 next_offset ;
2096
2130
u64 val ;
@@ -2101,7 +2135,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2101
2135
ite -> collection -> collection_id ;
2102
2136
val = cpu_to_le64 (val );
2103
2137
2104
- return vgic_its_write_entry_lock (its , gpa , val , ite_esz );
2138
+ return vgic_its_write_entry_lock (its , gpa , val , ite );
2105
2139
}
2106
2140
2107
2141
/**
@@ -2201,7 +2235,7 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2201
2235
if (ite -> irq -> hw && !kvm_vgic_global_state .has_gicv4_1 )
2202
2236
return - EACCES ;
2203
2237
2204
- ret = vgic_its_save_ite (its , device , ite , gpa , ite_esz );
2238
+ ret = vgic_its_save_ite (its , device , ite , gpa );
2205
2239
if (ret )
2206
2240
return ret ;
2207
2241
}
@@ -2240,10 +2274,9 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2240
2274
* @its: ITS handle
2241
2275
* @dev: ITS device
2242
2276
* @ptr: GPA
2243
- * @dte_esz: device table entry size
2244
2277
*/
2245
2278
static int vgic_its_save_dte (struct vgic_its * its , struct its_device * dev ,
2246
- gpa_t ptr , int dte_esz )
2279
+ gpa_t ptr )
2247
2280
{
2248
2281
u64 val , itt_addr_field ;
2249
2282
u32 next_offset ;
@@ -2256,7 +2289,7 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2256
2289
(dev -> num_eventid_bits - 1 ));
2257
2290
val = cpu_to_le64 (val );
2258
2291
2259
- return vgic_its_write_entry_lock (its , ptr , val , dte_esz );
2292
+ return vgic_its_write_entry_lock (its , ptr , val , dte );
2260
2293
}
2261
2294
2262
2295
/**
@@ -2332,10 +2365,8 @@ static int vgic_its_device_cmp(void *priv, const struct list_head *a,
2332
2365
*/
2333
2366
static int vgic_its_save_device_tables (struct vgic_its * its )
2334
2367
{
2335
- const struct vgic_its_abi * abi = vgic_its_get_abi (its );
2336
2368
u64 baser = its -> baser_device_table ;
2337
2369
struct its_device * dev ;
2338
- int dte_esz = abi -> dte_esz ;
2339
2370
2340
2371
if (!(baser & GITS_BASER_VALID ))
2341
2372
return 0 ;
@@ -2354,7 +2385,7 @@ static int vgic_its_save_device_tables(struct vgic_its *its)
2354
2385
if (ret )
2355
2386
return ret ;
2356
2387
2357
- ret = vgic_its_save_dte (its , dev , eaddr , dte_esz );
2388
+ ret = vgic_its_save_dte (its , dev , eaddr );
2358
2389
if (ret )
2359
2390
return ret ;
2360
2391
}
@@ -2435,7 +2466,7 @@ static int vgic_its_restore_device_tables(struct vgic_its *its)
2435
2466
2436
2467
static int vgic_its_save_cte (struct vgic_its * its ,
2437
2468
struct its_collection * collection ,
2438
- gpa_t gpa , int esz )
2469
+ gpa_t gpa )
2439
2470
{
2440
2471
u64 val ;
2441
2472
@@ -2444,23 +2475,23 @@ static int vgic_its_save_cte(struct vgic_its *its,
2444
2475
collection -> collection_id );
2445
2476
val = cpu_to_le64 (val );
2446
2477
2447
- return vgic_its_write_entry_lock (its , gpa , val , esz );
2478
+ return vgic_its_write_entry_lock (its , gpa , val , cte );
2448
2479
}
2449
2480
2450
2481
/*
2451
2482
* Restore a collection entry into the ITS collection table.
2452
2483
* Return +1 on success, 0 if the entry was invalid (which should be
2453
2484
* interpreted as end-of-table), and a negative error value for generic errors.
2454
2485
*/
2455
- static int vgic_its_restore_cte (struct vgic_its * its , gpa_t gpa , int esz )
2486
+ static int vgic_its_restore_cte (struct vgic_its * its , gpa_t gpa )
2456
2487
{
2457
2488
struct its_collection * collection ;
2458
2489
struct kvm * kvm = its -> dev -> kvm ;
2459
2490
u32 target_addr , coll_id ;
2460
2491
u64 val ;
2461
2492
int ret ;
2462
2493
2463
- ret = vgic_its_read_entry_lock (its , gpa , & val , esz );
2494
+ ret = vgic_its_read_entry_lock (its , gpa , & val , cte );
2464
2495
if (ret )
2465
2496
return ret ;
2466
2497
val = le64_to_cpu (val );
@@ -2507,7 +2538,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
2507
2538
max_size = GITS_BASER_NR_PAGES (baser ) * SZ_64K ;
2508
2539
2509
2540
list_for_each_entry (collection , & its -> collection_list , coll_list ) {
2510
- ret = vgic_its_save_cte (its , collection , gpa , cte_esz );
2541
+ ret = vgic_its_save_cte (its , collection , gpa );
2511
2542
if (ret )
2512
2543
return ret ;
2513
2544
gpa += cte_esz ;
@@ -2521,7 +2552,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
2521
2552
* table is not fully filled, add a last dummy element
2522
2553
* with valid bit unset
2523
2554
*/
2524
- return vgic_its_write_entry_lock (its , gpa , 0 , cte_esz );
2555
+ return vgic_its_write_entry_lock (its , gpa , 0ULL , cte );
2525
2556
}
2526
2557
2527
2558
/*
@@ -2546,7 +2577,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
2546
2577
max_size = GITS_BASER_NR_PAGES (baser ) * SZ_64K ;
2547
2578
2548
2579
while (read < max_size ) {
2549
- ret = vgic_its_restore_cte (its , gpa , cte_esz );
2580
+ ret = vgic_its_restore_cte (its , gpa );
2550
2581
if (ret <= 0 )
2551
2582
break ;
2552
2583
gpa += cte_esz ;
0 commit comments