Skip to content

Commit 1607a48

Browse files
author
Yihao Wu
committed
alinux: sched: Fix regression caused by nr_uninterruptible
fix #27788368 per cgroup nr_uninterruptible tracking leads to huge performance regression of hackbench. This patch delete nr_uninterruptible related code for now, to address performance regression issue. Fixes: 9410d31 ("alinux: cpuacct: Export nr_running & nr_uninterruptible") Fixes: 36da4fe ("alinux: sched: Maintain "nr_uninterruptible" in runqueue") Signed-off-by: Yihao Wu <wuyihao@linux.alibaba.com> Acked-by: Shanpei Chen <shanpeic@linux.alibaba.com>
1 parent 1d6103a commit 1607a48

File tree

5 files changed

+3
-155
lines changed

5 files changed

+3
-155
lines changed

kernel/sched/core.c

Lines changed: 3 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -744,28 +744,18 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
744744
p->sched_class->dequeue_task(rq, p, flags);
745745
}
746746

747-
static void update_nr_uninterruptible(struct task_struct *tsk, long inc)
748-
{
749-
if (tsk->sched_class->update_nr_uninterruptible)
750-
tsk->sched_class->update_nr_uninterruptible(tsk, inc);
751-
}
752-
753747
void activate_task(struct rq *rq, struct task_struct *p, int flags)
754748
{
755-
if (task_contributes_to_load(p)) {
756-
update_nr_uninterruptible(p, -1);
749+
if (task_contributes_to_load(p))
757750
rq->nr_uninterruptible--;
758-
}
759751

760752
enqueue_task(rq, p, flags);
761753
}
762754

763755
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
764756
{
765-
if (task_contributes_to_load(p)) {
766-
update_nr_uninterruptible(p, 1);
757+
if (task_contributes_to_load(p))
767758
rq->nr_uninterruptible++;
768-
}
769759

770760
dequeue_task(rq, p, flags);
771761
}
@@ -1700,10 +1690,8 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
17001690
lockdep_assert_held(&rq->lock);
17011691

17021692
#ifdef CONFIG_SMP
1703-
if (p->sched_contributes_to_load) {
1704-
update_nr_uninterruptible(p, -1);
1693+
if (p->sched_contributes_to_load)
17051694
rq->nr_uninterruptible--;
1706-
}
17071695

17081696
if (wake_flags & WF_MIGRATED)
17091697
en_flags |= ENQUEUE_MIGRATED;
@@ -6394,18 +6382,8 @@ void sched_move_task(struct task_struct *tsk)
63946382
if (running)
63956383
put_prev_task(rq, tsk);
63966384

6397-
/* decrease old group */
6398-
if ((!queued && task_contributes_to_load(tsk)) ||
6399-
(tsk->state == TASK_WAKING && tsk->sched_contributes_to_load))
6400-
update_nr_uninterruptible(tsk, -1);
6401-
64026385
sched_change_group(tsk, TASK_MOVE_GROUP);
64036386

6404-
/* increase new group after change */
6405-
if ((!queued && task_contributes_to_load(tsk)) ||
6406-
(tsk->state == TASK_WAKING && tsk->sched_contributes_to_load))
6407-
update_nr_uninterruptible(tsk, 1);
6408-
64096387
if (queued)
64106388
enqueue_task(rq, tsk, queue_flags);
64116389
if (running)

kernel/sched/cpuacct.c

Lines changed: 0 additions & 99 deletions
Original file line numberDiff line numberDiff line change
@@ -358,95 +358,6 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
358358
struct task_group, css);
359359
}
360360

361-
static inline unsigned long nr_uninterruptible(void)
362-
{
363-
unsigned long i, sum = 0;
364-
365-
for_each_possible_cpu(i)
366-
sum += cpu_rq(i)->nr_uninterruptible;
367-
368-
/*
369-
* Since we read the counters lockless, it might be slightly
370-
* inaccurate. Do not allow it to go below zero though:
371-
*/
372-
if (unlikely((long)sum < 0))
373-
sum = 0;
374-
375-
return sum;
376-
}
377-
378-
#ifdef CONFIG_CFS_BANDWIDTH
379-
static inline bool tg_cfs_throttled(struct task_group *tg, int cpu)
380-
{
381-
return tg->cfs_rq[cpu]->throttle_count;
382-
}
383-
#else
384-
static inline bool tg_cfs_throttled(struct task_group *tg, int cpu)
385-
{
386-
return false;
387-
}
388-
#endif
389-
390-
#ifdef CONFIG_RT_GROUP_SCHED
391-
static inline bool tg_rt_throttled(struct task_group *tg, int cpu)
392-
{
393-
return tg->rt_rq[cpu]->rt_throttled && !tg->rt_rq[cpu]->rt_nr_boosted;
394-
}
395-
#endif
396-
397-
static unsigned long ca_running(struct cpuacct *ca, int cpu)
398-
{
399-
unsigned long nr_running = 0;
400-
struct cgroup *cgrp = ca->css.cgroup;
401-
struct task_group *tg;
402-
403-
/* Make sure it is only called for non-root cpuacct */
404-
if (ca == &root_cpuacct)
405-
return 0;
406-
407-
rcu_read_lock();
408-
tg = cgroup_tg(cgrp);
409-
if (unlikely(!tg))
410-
goto out;
411-
412-
if (!tg_cfs_throttled(tg, cpu))
413-
nr_running += tg->cfs_rq[cpu]->h_nr_running;
414-
#ifdef CONFIG_RT_GROUP_SCHED
415-
if (!tg_rt_throttled(tg, cpu))
416-
nr_running += tg->rt_rq[cpu]->rt_nr_running;
417-
#endif
418-
/* SCHED_DEADLINE doesn't support cgroup yet */
419-
420-
out:
421-
rcu_read_unlock();
422-
return nr_running;
423-
}
424-
425-
static unsigned long ca_uninterruptible(struct cpuacct *ca, int cpu)
426-
{
427-
unsigned long nr = 0;
428-
struct cgroup *cgrp = ca->css.cgroup;
429-
struct task_group *tg;
430-
431-
/* Make sure it is only called for non-root cpuacct */
432-
if (ca == &root_cpuacct)
433-
return nr;
434-
435-
rcu_read_lock();
436-
tg = cgroup_tg(cgrp);
437-
if (unlikely(!tg))
438-
goto out_rcu_unlock;
439-
440-
nr = tg->cfs_rq[cpu]->nr_uninterruptible;
441-
#ifdef CONFIG_RT_GROUP_SCHED
442-
nr += tg->rt_rq[cpu]->nr_uninterruptible;
443-
#endif
444-
445-
out_rcu_unlock:
446-
rcu_read_unlock();
447-
return nr;
448-
}
449-
450361
void cgroup_idle_start(struct sched_entity *se)
451362
{
452363
unsigned long flags;
@@ -625,7 +536,6 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v)
625536
u64 user, nice, system, idle, iowait, irq, softirq, steal, guest;
626537
u64 nr_migrations = 0;
627538
struct cpuacct_alistats *alistats;
628-
unsigned long nr_run = 0, nr_uninter = 0;
629539
int cpu;
630540

631541
user = nice = system = idle = iowait =
@@ -656,8 +566,6 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v)
656566

657567
alistats = per_cpu_ptr(ca->alistats, cpu);
658568
nr_migrations += alistats->nr_migrations;
659-
nr_run += ca_running(ca, cpu);
660-
nr_uninter += ca_uninterruptible(ca, cpu);
661569
}
662570
} else {
663571
struct kernel_cpustat *kcpustat;
@@ -677,9 +585,6 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v)
677585
alistats = per_cpu_ptr(ca->alistats, cpu);
678586
nr_migrations += alistats->nr_migrations;
679587
}
680-
681-
nr_run = nr_running();
682-
nr_uninter = nr_uninterruptible();
683588
}
684589

685590
seq_printf(sf, "user %lld\n", nsec_to_clock_t(user));
@@ -692,10 +597,6 @@ static int cpuacct_proc_stats_show(struct seq_file *sf, void *v)
692597
seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal));
693598
seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest));
694599

695-
seq_printf(sf, "nr_running %lld\n", (u64)nr_run);
696-
if ((long) nr_uninter < 0)
697-
nr_uninter = 0;
698-
seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter);
699600
seq_printf(sf, "nr_migrations %lld\n", (u64)nr_migrations);
700601

701602
return 0;

kernel/sched/fair.c

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -10311,16 +10311,6 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task
1031110311
return rr_interval;
1031210312
}
1031310313

10314-
#ifdef CONFIG_SCHED_SLI
10315-
static void update_nr_uninterruptible_fair(struct task_struct *p, long inc)
10316-
{
10317-
struct sched_entity *se = &p->se;
10318-
10319-
for_each_sched_entity(se)
10320-
cfs_rq_of(se)->nr_uninterruptible += inc;
10321-
}
10322-
#endif
10323-
1032410314
/*
1032510315
* All the scheduling class methods:
1032610316
*/
@@ -10364,7 +10354,6 @@ const struct sched_class fair_sched_class = {
1036410354
#endif
1036510355

1036610356
#ifdef CONFIG_SCHED_SLI
10367-
.update_nr_uninterruptible = update_nr_uninterruptible_fair,
1036810357
.update_nr_iowait = update_nr_iowait_fair,
1036910358
#endif
1037010359
};

kernel/sched/rt.c

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -2374,16 +2374,6 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
23742374
return 0;
23752375
}
23762376

2377-
#ifdef CONFIG_SCHED_SLI
2378-
static void update_nr_uninterruptible_rt(struct task_struct *p, long inc)
2379-
{
2380-
struct sched_rt_entity *se = &p->rt;
2381-
2382-
for_each_sched_rt_entity(se)
2383-
rt_rq_of_se(se)->nr_uninterruptible += inc;
2384-
}
2385-
#endif
2386-
23872377
const struct sched_class rt_sched_class = {
23882378
.next = &fair_sched_class,
23892379
.enqueue_task = enqueue_task_rt,
@@ -2414,10 +2404,6 @@ const struct sched_class rt_sched_class = {
24142404
.switched_to = switched_to_rt,
24152405

24162406
.update_curr = update_curr_rt,
2417-
2418-
#ifdef CONFIG_SCHED_SLI
2419-
.update_nr_uninterruptible = update_nr_uninterruptible_rt,
2420-
#endif
24212407
};
24222408

24232409
#ifdef CONFIG_RT_GROUP_SCHED

kernel/sched/sched.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -575,8 +575,6 @@ struct cfs_rq {
575575
#endif /* CONFIG_CFS_BANDWIDTH */
576576
#endif /* CONFIG_FAIR_GROUP_SCHED */
577577

578-
unsigned long nr_uninterruptible;
579-
580578
ALI_HOTFIX_RESERVE(1)
581579
ALI_HOTFIX_RESERVE(2)
582580
ALI_HOTFIX_RESERVE(3)
@@ -627,8 +625,6 @@ struct rt_rq {
627625
struct rq *rq;
628626
struct task_group *tg;
629627
#endif
630-
631-
unsigned long nr_uninterruptible;
632628
};
633629

634630
static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
@@ -1675,8 +1671,6 @@ struct sched_class {
16751671
#ifdef CONFIG_FAIR_GROUP_SCHED
16761672
void (*task_change_group)(struct task_struct *p, int type);
16771673
#endif
1678-
1679-
void (*update_nr_uninterruptible)(struct task_struct *p, long inc);
16801674
void (*update_nr_iowait)(struct task_struct *p, long inc);
16811675
};
16821676

0 commit comments

Comments
 (0)