@@ -235,50 +235,15 @@ BPF_PROG(name, ##args)
235
235
__addr; \
236
236
})
237
237
238
+
238
239
/*
239
- * BPF core and other generic helpers
240
+ * BPF declarations and helpers
240
241
*/
241
242
242
243
/* list and rbtree */
243
244
#define __contains (name , node ) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
244
245
#define private (name ) SEC(".data." #name) __hidden __attribute__((aligned(8)))
245
246
246
- /*
247
- * bpf_log2 - Compute the base 2 logarithm of a 32-bit exponential value.
248
- * @v: The value for which we're computing the base 2 logarithm.
249
- */
250
- static inline u32 bpf_log2 (u32 v )
251
- {
252
- u32 r ;
253
- u32 shift ;
254
-
255
- r = (v > 0xFFFF ) << 4 ; v >>= r ;
256
- shift = (v > 0xFF ) << 3 ; v >>= shift ; r |= shift ;
257
- shift = (v > 0xF ) << 2 ; v >>= shift ; r |= shift ;
258
- shift = (v > 0x3 ) << 1 ; v >>= shift ; r |= shift ;
259
- r |= (v >> 1 );
260
- return r ;
261
- }
262
-
263
- /*
264
- * bpf_log2l - Compute the base 2 logarithm of a 64-bit exponential value.
265
- * @v: The value for which we're computing the base 2 logarithm.
266
- */
267
- static inline u32 bpf_log2l (u64 v )
268
- {
269
- u32 hi = v >> 32 ;
270
- if (hi )
271
- return bpf_log2 (hi ) + 32 + 1 ;
272
- else
273
- return bpf_log2 (v ) + 1 ;
274
- }
275
-
276
- /* useful compiler attributes */
277
- #define likely (x ) __builtin_expect(!!(x), 1)
278
- #define unlikely (x ) __builtin_expect(!!(x), 0)
279
- #define __maybe_unused __attribute__((__unused__))
280
-
281
-
282
247
void * bpf_obj_new_impl (__u64 local_type_id , void * meta ) __ksym ;
283
248
void bpf_obj_drop_impl (void * kptr , void * meta ) __ksym ;
284
249
@@ -311,6 +276,16 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
311
276
void bpf_cgroup_release (struct cgroup * cgrp ) __ksym ;
312
277
struct cgroup * bpf_cgroup_from_id (u64 cgid ) __ksym ;
313
278
279
+ /* css iteration */
280
+ struct bpf_iter_css ;
281
+ struct cgroup_subsys_state ;
282
+ extern int bpf_iter_css_new (struct bpf_iter_css * it ,
283
+ struct cgroup_subsys_state * start ,
284
+ unsigned int flags ) __weak __ksym ;
285
+ extern struct cgroup_subsys_state *
286
+ bpf_iter_css_next (struct bpf_iter_css * it ) __weak __ksym ;
287
+ extern void bpf_iter_css_destroy (struct bpf_iter_css * it ) __weak __ksym ;
288
+
314
289
/* cpumask */
315
290
struct bpf_cpumask * bpf_cpumask_create (void ) __ksym ;
316
291
struct bpf_cpumask * bpf_cpumask_acquire (struct bpf_cpumask * cpumask ) __ksym ;
@@ -344,6 +319,99 @@ u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
344
319
void bpf_rcu_read_lock (void ) __ksym ;
345
320
void bpf_rcu_read_unlock (void ) __ksym ;
346
321
322
+
323
+ /*
324
+ * Other helpers
325
+ */
326
+
327
+ /* useful compiler attributes */
328
+ #define likely (x ) __builtin_expect(!!(x), 1)
329
+ #define unlikely (x ) __builtin_expect(!!(x), 0)
330
+ #define __maybe_unused __attribute__((__unused__))
331
+
332
+ /*
333
+ * READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They
334
+ * prevent compiler from caching, redoing or reordering reads or writes.
335
+ */
336
+ typedef __u8 __attribute__ ( (__may_alias__ )) __u8_alias_t ;
337
+ typedef __u16 __attribute__ ( (__may_alias__ )) __u16_alias_t ;
338
+ typedef __u32 __attribute__ ( (__may_alias__ )) __u32_alias_t ;
339
+ typedef __u64 __attribute__ ( (__may_alias__ )) __u64_alias_t ;
340
+
341
+ static __always_inline void __read_once_size (const volatile void * p , void * res , int size )
342
+ {
343
+ switch (size ) {
344
+ case 1 : * (__u8_alias_t * ) res = * (volatile __u8_alias_t * ) p ; break ;
345
+ case 2 : * (__u16_alias_t * ) res = * (volatile __u16_alias_t * ) p ; break ;
346
+ case 4 : * (__u32_alias_t * ) res = * (volatile __u32_alias_t * ) p ; break ;
347
+ case 8 : * (__u64_alias_t * ) res = * (volatile __u64_alias_t * ) p ; break ;
348
+ default :
349
+ barrier ();
350
+ __builtin_memcpy ((void * )res , (const void * )p , size );
351
+ barrier ();
352
+ }
353
+ }
354
+
355
+ static __always_inline void __write_once_size (volatile void * p , void * res , int size )
356
+ {
357
+ switch (size ) {
358
+ case 1 : * (volatile __u8_alias_t * ) p = * (__u8_alias_t * ) res ; break ;
359
+ case 2 : * (volatile __u16_alias_t * ) p = * (__u16_alias_t * ) res ; break ;
360
+ case 4 : * (volatile __u32_alias_t * ) p = * (__u32_alias_t * ) res ; break ;
361
+ case 8 : * (volatile __u64_alias_t * ) p = * (__u64_alias_t * ) res ; break ;
362
+ default :
363
+ barrier ();
364
+ __builtin_memcpy ((void * )p , (const void * )res , size );
365
+ barrier ();
366
+ }
367
+ }
368
+
369
+ #define READ_ONCE (x ) \
370
+ ({ \
371
+ union { typeof(x) __val; char __c[1]; } __u = \
372
+ { .__c = { 0 } }; \
373
+ __read_once_size(&(x), __u.__c, sizeof(x)); \
374
+ __u.__val; \
375
+ })
376
+
377
+ #define WRITE_ONCE (x , val ) \
378
+ ({ \
379
+ union { typeof(x) __val; char __c[1]; } __u = \
380
+ { .__val = (val) }; \
381
+ __write_once_size(&(x), __u.__c, sizeof(x)); \
382
+ __u.__val; \
383
+ })
384
+
385
+ /*
386
+ * log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
387
+ * @v: The value for which we're computing the base 2 logarithm.
388
+ */
389
+ static inline u32 log2_u32 (u32 v )
390
+ {
391
+ u32 r ;
392
+ u32 shift ;
393
+
394
+ r = (v > 0xFFFF ) << 4 ; v >>= r ;
395
+ shift = (v > 0xFF ) << 3 ; v >>= shift ; r |= shift ;
396
+ shift = (v > 0xF ) << 2 ; v >>= shift ; r |= shift ;
397
+ shift = (v > 0x3 ) << 1 ; v >>= shift ; r |= shift ;
398
+ r |= (v >> 1 );
399
+ return r ;
400
+ }
401
+
402
+ /*
403
+ * log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value.
404
+ * @v: The value for which we're computing the base 2 logarithm.
405
+ */
406
+ static inline u32 log2_u64 (u64 v )
407
+ {
408
+ u32 hi = v >> 32 ;
409
+ if (hi )
410
+ return log2_u32 (hi ) + 32 + 1 ;
411
+ else
412
+ return log2_u32 (v ) + 1 ;
413
+ }
414
+
347
415
#include "compat.bpf.h"
348
416
349
417
#endif /* __SCX_COMMON_BPF_H */
0 commit comments