Skip to content
This repository was archived by the owner on Jun 18, 2024. It is now read-only.

Commit 68981ae

Browse files
authored
Merge pull request #223 from sched-ext/htejun/sync-scx
tools/sched_ext: Sync from scx repo (d3b34d1df7aabb66244607f1473ecc5774b5c9b3)
2 parents caa7501 + 389f282 commit 68981ae

File tree

5 files changed

+123
-54
lines changed

5 files changed

+123
-54
lines changed

tools/sched_ext/include/scx/common.bpf.h

Lines changed: 105 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -235,50 +235,15 @@ BPF_PROG(name, ##args)
235235
__addr; \
236236
})
237237

238+
238239
/*
239-
* BPF core and other generic helpers
240+
* BPF declarations and helpers
240241
*/
241242

242243
/* list and rbtree */
243244
#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
244245
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
245246

246-
/*
247-
* bpf_log2 - Compute the base 2 logarithm of a 32-bit exponential value.
248-
* @v: The value for which we're computing the base 2 logarithm.
249-
*/
250-
static inline u32 bpf_log2(u32 v)
251-
{
252-
u32 r;
253-
u32 shift;
254-
255-
r = (v > 0xFFFF) << 4; v >>= r;
256-
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
257-
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
258-
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
259-
r |= (v >> 1);
260-
return r;
261-
}
262-
263-
/*
264-
* bpf_log2l - Compute the base 2 logarithm of a 64-bit exponential value.
265-
* @v: The value for which we're computing the base 2 logarithm.
266-
*/
267-
static inline u32 bpf_log2l(u64 v)
268-
{
269-
u32 hi = v >> 32;
270-
if (hi)
271-
return bpf_log2(hi) + 32 + 1;
272-
else
273-
return bpf_log2(v) + 1;
274-
}
275-
276-
/* useful compiler attributes */
277-
#define likely(x) __builtin_expect(!!(x), 1)
278-
#define unlikely(x) __builtin_expect(!!(x), 0)
279-
#define __maybe_unused __attribute__((__unused__))
280-
281-
282247
void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
283248
void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;
284249

@@ -311,6 +276,16 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
311276
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
312277
struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
313278

279+
/* css iteration */
280+
struct bpf_iter_css;
281+
struct cgroup_subsys_state;
282+
extern int bpf_iter_css_new(struct bpf_iter_css *it,
283+
struct cgroup_subsys_state *start,
284+
unsigned int flags) __weak __ksym;
285+
extern struct cgroup_subsys_state *
286+
bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
287+
extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;
288+
314289
/* cpumask */
315290
struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
316291
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
@@ -344,6 +319,99 @@ u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
344319
void bpf_rcu_read_lock(void) __ksym;
345320
void bpf_rcu_read_unlock(void) __ksym;
346321

322+
323+
/*
324+
* Other helpers
325+
*/
326+
327+
/* useful compiler attributes */
328+
#define likely(x) __builtin_expect(!!(x), 1)
329+
#define unlikely(x) __builtin_expect(!!(x), 0)
330+
#define __maybe_unused __attribute__((__unused__))
331+
332+
/*
333+
* READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They
334+
* prevent compiler from caching, redoing or reordering reads or writes.
335+
*/
336+
typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
337+
typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
338+
typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
339+
typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
340+
341+
static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
342+
{
343+
switch (size) {
344+
case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
345+
case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
346+
case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
347+
case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
348+
default:
349+
barrier();
350+
__builtin_memcpy((void *)res, (const void *)p, size);
351+
barrier();
352+
}
353+
}
354+
355+
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
356+
{
357+
switch (size) {
358+
case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
359+
case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
360+
case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
361+
case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
362+
default:
363+
barrier();
364+
__builtin_memcpy((void *)p, (const void *)res, size);
365+
barrier();
366+
}
367+
}
368+
369+
#define READ_ONCE(x) \
370+
({ \
371+
union { typeof(x) __val; char __c[1]; } __u = \
372+
{ .__c = { 0 } }; \
373+
__read_once_size(&(x), __u.__c, sizeof(x)); \
374+
__u.__val; \
375+
})
376+
377+
#define WRITE_ONCE(x, val) \
378+
({ \
379+
union { typeof(x) __val; char __c[1]; } __u = \
380+
{ .__val = (val) }; \
381+
__write_once_size(&(x), __u.__c, sizeof(x)); \
382+
__u.__val; \
383+
})
384+
385+
/*
386+
* log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
387+
* @v: The value for which we're computing the base 2 logarithm.
388+
*/
389+
static inline u32 log2_u32(u32 v)
390+
{
391+
u32 r;
392+
u32 shift;
393+
394+
r = (v > 0xFFFF) << 4; v >>= r;
395+
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
396+
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
397+
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
398+
r |= (v >> 1);
399+
return r;
400+
}
401+
402+
/*
403+
* log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value.
404+
* @v: The value for which we're computing the base 2 logarithm.
405+
*/
406+
static inline u32 log2_u64(u64 v)
407+
{
408+
u32 hi = v >> 32;
409+
if (hi)
410+
return log2_u32(hi) + 32 + 1;
411+
else
412+
return log2_u32(v) + 1;
413+
}
414+
347415
#include "compat.bpf.h"
348416

349417
#endif /* __SCX_COMMON_BPF_H */

tools/sched_ext/include/scx/common.h

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,8 @@ typedef int64_t s64;
4444

4545
/**
4646
* RESIZE_ARRAY - Convenience macro for resizing a BPF array
47-
* @elfsec: the data section of the BPF program in which to the array exists
47+
* @__skel: the skeleton containing the array
48+
* @elfsec: the data section of the BPF program in which the array exists
4849
* @arr: the name of the array
4950
* @n: the desired array element count
5051
*
@@ -56,13 +57,13 @@ typedef int64_t s64;
5657
* for that custom data section so that it points to the newly memory mapped
5758
* region.
5859
*/
59-
#define RESIZE_ARRAY(elfsec, arr, n) \
60-
do { \
61-
size_t __sz; \
62-
bpf_map__set_value_size(skel->maps.elfsec##_##arr, \
63-
sizeof(skel->elfsec##_##arr->arr[0]) * (n)); \
64-
skel->elfsec##_##arr = \
65-
bpf_map__initial_value(skel->maps.elfsec##_##arr, &__sz); \
60+
#define RESIZE_ARRAY(__skel, elfsec, arr, n) \
61+
do { \
62+
size_t __sz; \
63+
bpf_map__set_value_size((__skel)->maps.elfsec##_##arr, \
64+
sizeof((__skel)->elfsec##_##arr->arr[0]) * (n)); \
65+
(__skel)->elfsec##_##arr = \
66+
bpf_map__initial_value((__skel)->maps.elfsec##_##arr, &__sz); \
6667
} while (0)
6768

6869
#include "user_exit_info.h"

tools/sched_ext/include/scx/user_exit_info.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,10 +53,10 @@ struct user_exit_info {
5353
#include <stdbool.h>
5454

5555
/* no need to call the following explicitly if SCX_OPS_LOAD() is used */
56-
#define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \
57-
u32 __len = (__skel)->struct_ops.__ops_name->exit_dump_len ?: UEI_DUMP_DFL_LEN; \
58-
(__skel)->rodata->__uei_name##_dump_len = __len; \
59-
RESIZE_ARRAY(data, __uei_name##_dump, __len); \
56+
#define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \
57+
u32 __len = (__skel)->struct_ops.__ops_name->exit_dump_len ?: UEI_DUMP_DFL_LEN; \
58+
(__skel)->rodata->__uei_name##_dump_len = __len; \
59+
RESIZE_ARRAY((__skel), data, __uei_name##_dump, __len); \
6060
})
6161

6262
#define UEI_EXITED(__skel, __uei_name) ({ \

tools/sched_ext/scx_central.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,8 +77,8 @@ int main(int argc, char **argv)
7777
}
7878

7979
/* Resize arrays so their element count is equal to cpu count. */
80-
RESIZE_ARRAY(data, cpu_gimme_task, skel->rodata->nr_cpu_ids);
81-
RESIZE_ARRAY(data, cpu_started_at, skel->rodata->nr_cpu_ids);
80+
RESIZE_ARRAY(skel, data, cpu_gimme_task, skel->rodata->nr_cpu_ids);
81+
RESIZE_ARRAY(skel, data, cpu_started_at, skel->rodata->nr_cpu_ids);
8282

8383
SCX_OPS_LOAD(skel, central_ops, scx_central, uei);
8484

tools/sched_ext/scx_qmap.bpf.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -579,7 +579,7 @@ struct {
579579
__uint(max_entries, 1);
580580
__type(key, u32);
581581
__type(value, struct monitor_timer);
582-
} central_timer SEC(".maps");
582+
} monitor_timer SEC(".maps");
583583

584584
/*
585585
* Print out the min, avg and max performance levels of CPUs every second to
@@ -692,11 +692,11 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(qmap_init)
692692
if (ret)
693693
return ret;
694694

695-
timer = bpf_map_lookup_elem(&central_timer, &key);
695+
timer = bpf_map_lookup_elem(&monitor_timer, &key);
696696
if (!timer)
697697
return -ESRCH;
698698

699-
bpf_timer_init(timer, &central_timer, CLOCK_MONOTONIC);
699+
bpf_timer_init(timer, &monitor_timer, CLOCK_MONOTONIC);
700700
bpf_timer_set_callback(timer, monitor_timerfn);
701701

702702
return bpf_timer_start(timer, ONE_SEC_IN_NS, 0);

0 commit comments

Comments
 (0)