Skip to content
This repository was archived by the owner on Jun 18, 2024. It is now read-only.

Commit

Permalink
scx: Remove enum rq_onoff_reason enum
Browse files Browse the repository at this point in the history
The existing hotplug code is a bit brittle in that the sched_class->{online,
offline} callbacks can be invoked both from hotplug context, or from domain
rebuild context. A callback is only invoked in one of the two contexts, with
the context that runs the callback setting rq->online accordingly to avoid
doing the callback more times than necessary. Unfortuntaly, this causes commit
2125c00 ("cgroup/cpuset: Make cpuset hotplug processing synchronous") to
break hotplug for us because it makes the topology rq->online event happen
before the cpu hotplug rq->online event; thus preventing us from invoking the
cpu online callback in the scheduler.

This integration is fragile and hacky, so we'll instead call directly into
ext.c on the hotplug path. This will be added in a subsequent commit.

Signed-off-by: David Vernet <void@manifault.com>
  • Loading branch information
Byte-Lab committed Jun 5, 2024
1 parent c071b9d commit bf76f6f
Show file tree
Hide file tree
Showing 7 changed files with 24 additions and 33 deletions.
12 changes: 6 additions & 6 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -9670,7 +9670,7 @@ static inline void balance_hotplug_wait(void)

#endif /* CONFIG_HOTPLUG_CPU */

void set_rq_online(struct rq *rq, enum rq_onoff_reason reason)
void set_rq_online(struct rq *rq)
{
if (!rq->online) {
const struct sched_class *class;
Expand All @@ -9680,20 +9680,20 @@ void set_rq_online(struct rq *rq, enum rq_onoff_reason reason)

for_each_class(class) {
if (class->rq_online)
class->rq_online(rq, reason);
class->rq_online(rq);
}
}
}

void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason)
void set_rq_offline(struct rq *rq)
{
if (rq->online) {
const struct sched_class *class;

update_rq_clock(rq);
for_each_class(class) {
if (class->rq_offline)
class->rq_offline(rq, reason);
class->rq_offline(rq);
}

cpumask_clear_cpu(rq->cpu, rq->rd->online);
Expand Down Expand Up @@ -9789,7 +9789,7 @@ int sched_cpu_activate(unsigned int cpu)
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_online(rq, RQ_ONOFF_HOTPLUG);
set_rq_online(rq);
}
rq_unlock_irqrestore(rq, &rf);

Expand Down Expand Up @@ -9833,7 +9833,7 @@ int sched_cpu_deactivate(unsigned int cpu)
rq_lock_irqsave(rq, &rf);
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq, RQ_ONOFF_HOTPLUG);
set_rq_offline(rq);
}
rq_unlock_irqrestore(rq, &rf);

Expand Down
4 changes: 2 additions & 2 deletions kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -2606,7 +2606,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
}

/* Assumes rq->lock is held */
static void rq_online_dl(struct rq *rq, enum rq_onoff_reason reason)
static void rq_online_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_set_overload(rq);
Expand All @@ -2617,7 +2617,7 @@ static void rq_online_dl(struct rq *rq, enum rq_onoff_reason reason)
}

/* Assumes rq->lock is held */
static void rq_offline_dl(struct rq *rq, enum rq_onoff_reason reason)
static void rq_offline_dl(struct rq *rq)
{
if (rq->dl.overloaded)
dl_clear_overload(rq);
Expand Down
16 changes: 6 additions & 10 deletions kernel/sched/ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -3218,20 +3218,16 @@ static void handle_hotplug(struct rq *rq, bool online)
online ? "online" : "offline");
}

static void rq_online_scx(struct rq *rq, enum rq_onoff_reason reason)
static void rq_online_scx(struct rq *rq)
{
if (reason == RQ_ONOFF_HOTPLUG) {
handle_hotplug(rq, true);
rq->scx.flags |= SCX_RQ_ONLINE;
}
handle_hotplug(rq, true);
rq->scx.flags |= SCX_RQ_ONLINE;
}

static void rq_offline_scx(struct rq *rq, enum rq_onoff_reason reason)
static void rq_offline_scx(struct rq *rq)
{
if (reason == RQ_ONOFF_HOTPLUG) {
rq->scx.flags &= ~SCX_RQ_ONLINE;
handle_hotplug(rq, false);
}
rq->scx.flags &= ~SCX_RQ_ONLINE;
handle_hotplug(rq, false);
}

#else /* CONFIG_SMP */
Expand Down
4 changes: 2 additions & 2 deletions kernel/sched/fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -12511,14 +12511,14 @@ void sched_balance_trigger(struct rq *rq)
nohz_balancer_kick(rq);
}

static void rq_online_fair(struct rq *rq, enum rq_onoff_reason reason)
static void rq_online_fair(struct rq *rq)
{
update_sysctl();

update_runtime_enabled(rq);
}

static void rq_offline_fair(struct rq *rq, enum rq_onoff_reason reason)
static void rq_offline_fair(struct rq *rq)
{
update_sysctl();

Expand Down
4 changes: 2 additions & 2 deletions kernel/sched/rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -2425,7 +2425,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
}

/* Assumes rq->lock is held */
static void rq_online_rt(struct rq *rq, enum rq_onoff_reason reason)
static void rq_online_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_set_overload(rq);
Expand All @@ -2436,7 +2436,7 @@ static void rq_online_rt(struct rq *rq, enum rq_onoff_reason reason)
}

/* Assumes rq->lock is held */
static void rq_offline_rt(struct rq *rq, enum rq_onoff_reason reason)
static void rq_offline_rt(struct rq *rq)
{
if (rq->rt.overloaded)
rt_clear_overload(rq);
Expand Down
13 changes: 4 additions & 9 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2328,11 +2328,6 @@ extern const u32 sched_prio_to_wmult[40];

#define RETRY_TASK ((void *)-1UL)

enum rq_onoff_reason {
RQ_ONOFF_HOTPLUG, /* CPU is going on/offline */
RQ_ONOFF_TOPOLOGY, /* sched domain topology update */
};

struct affinity_context {
const struct cpumask *new_mask;
struct cpumask *user_mask;
Expand Down Expand Up @@ -2371,8 +2366,8 @@ struct sched_class {

void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);

void (*rq_online)(struct rq *rq, enum rq_onoff_reason reason);
void (*rq_offline)(struct rq *rq, enum rq_onoff_reason reason);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);

struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
#endif
Expand Down Expand Up @@ -2913,8 +2908,8 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
raw_spin_rq_unlock(rq1);
}

extern void set_rq_online (struct rq *rq, enum rq_onoff_reason reason);
extern void set_rq_offline(struct rq *rq, enum rq_onoff_reason reason);
extern void set_rq_online (struct rq *rq);
extern void set_rq_offline(struct rq *rq);
extern bool sched_smp_initialized;

#else /* CONFIG_SMP */
Expand Down
4 changes: 2 additions & 2 deletions kernel/sched/topology.c
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
old_rd = rq->rd;

if (cpumask_test_cpu(rq->cpu, old_rd->online))
set_rq_offline(rq, RQ_ONOFF_TOPOLOGY);
set_rq_offline(rq);

cpumask_clear_cpu(rq->cpu, old_rd->span);

Expand All @@ -514,7 +514,7 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)

cpumask_set_cpu(rq->cpu, rd->span);
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
set_rq_online(rq, RQ_ONOFF_TOPOLOGY);
set_rq_online(rq);

rq_unlock_irqrestore(rq, &rf);

Expand Down

0 comments on commit bf76f6f

Please sign in to comment.