aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c36
1 files changed, 29 insertions, 7 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 30d76a18ae1a..6f353de3f390 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2757,6 +2757,10 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2757 cfs_rq->load_last_update_time_copy = sa->last_update_time; 2757 cfs_rq->load_last_update_time_copy = sa->last_update_time;
2758#endif 2758#endif
2759 2759
2760 /* Trace CPU load, unless cfs_rq belongs to a non-root task_group */
2761 if (cfs_rq == &rq_of(cfs_rq)->cfs)
2762 trace_sched_load_avg_cpu(cpu_of(rq_of(cfs_rq)), cfs_rq);
2763
2760 return decayed || removed; 2764 return decayed || removed;
2761} 2765}
2762 2766
@@ -2780,7 +2784,6 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
2780 2784
2781 if (entity_is_task(se)) 2785 if (entity_is_task(se))
2782 trace_sched_load_avg_task(task_of(se), &se->avg); 2786 trace_sched_load_avg_task(task_of(se), &se->avg);
2783 trace_sched_load_avg_cpu(cpu, cfs_rq);
2784} 2787}
2785 2788
2786static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) 2789static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -3958,6 +3961,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3958 if (!cfs_bandwidth_used()) 3961 if (!cfs_bandwidth_used())
3959 return; 3962 return;
3960 3963
3964 /* Synchronize hierarchical throttle counter: */
3965 if (unlikely(!cfs_rq->throttle_uptodate)) {
3966 struct rq *rq = rq_of(cfs_rq);
3967 struct cfs_rq *pcfs_rq;
3968 struct task_group *tg;
3969
3970 cfs_rq->throttle_uptodate = 1;
3971
3972 /* Get closest up-to-date node, because leaves go first: */
3973 for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
3974 pcfs_rq = tg->cfs_rq[cpu_of(rq)];
3975 if (pcfs_rq->throttle_uptodate)
3976 break;
3977 }
3978 if (tg) {
3979 cfs_rq->throttle_count = pcfs_rq->throttle_count;
3980 cfs_rq->throttled_clock_task = rq_clock_task(rq);
3981 }
3982 }
3983
3961 /* an active group must be handled by the update_curr()->put() path */ 3984 /* an active group must be handled by the update_curr()->put() path */
3962 if (!cfs_rq->runtime_enabled || cfs_rq->curr) 3985 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3963 return; 3986 return;
@@ -4343,15 +4366,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4343 4366
4344 /* Don't dequeue parent if it has other entities besides us */ 4367 /* Don't dequeue parent if it has other entities besides us */
4345 if (cfs_rq->load.weight) { 4368 if (cfs_rq->load.weight) {
4369 /* Avoid re-evaluating load for this entity: */
4370 se = parent_entity(se);
4346 /* 4371 /*
4347 * Bias pick_next to pick a task from this cfs_rq, as 4372 * Bias pick_next to pick a task from this cfs_rq, as
4348 * p is sleeping when it is within its sched_slice. 4373 * p is sleeping when it is within its sched_slice.
4349 */ 4374 */
4350 if (task_sleep && parent_entity(se)) 4375 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4351 set_next_buddy(parent_entity(se)); 4376 set_next_buddy(se);
4352
4353 /* avoid re-evaluating load for this entity */
4354 se = parent_entity(se);
4355 break; 4377 break;
4356 } 4378 }
4357 flags |= DEQUEUE_SLEEP; 4379 flags |= DEQUEUE_SLEEP;
@@ -4916,7 +4938,7 @@ long group_norm_util(struct energy_env *eenv, struct sched_group *sg)
4916} 4938}
4917 4939
4918static int find_new_capacity(struct energy_env *eenv, 4940static int find_new_capacity(struct energy_env *eenv,
4919 const struct sched_group_energy const *sge) 4941 const struct sched_group_energy * const sge)
4920{ 4942{
4921 int idx; 4943 int idx;
4922 unsigned long util = group_max_util(eenv); 4944 unsigned long util = group_max_util(eenv);