aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/deadline.c')
-rw-r--r--kernel/sched/deadline.c100
1 files changed, 88 insertions, 12 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 772169e4775f..0b7e60de85f3 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -498,13 +498,84 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
498} 498}
499 499
500/* 500/*
501 * When a -deadline entity is queued back on the runqueue, its runtime and 501 * Revised wakeup rule [1]: For self-suspending tasks, rather then
502 * deadline might need updating. 502 * re-initializing task's runtime and deadline, the revised wakeup
503 * rule adjusts the task's runtime to avoid the task to overrun its
504 * density.
503 * 505 *
504 * The policy here is that we update the deadline of the entity only if: 506 * Reasoning: a task may overrun the density if:
505 * - the current deadline is in the past, 507 * runtime / (deadline - t) > dl_runtime / dl_deadline
506 * - using the remaining runtime with the current deadline would make 508 *
507 * the entity exceed its bandwidth. 509 * Therefore, runtime can be adjusted to:
510 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
511 *
512 * In such way that runtime will be equal to the maximum density
513 * the task can use without breaking any rule.
514 *
515 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
516 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
517 */
518static void
519update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
520{
521 u64 laxity = dl_se->deadline - rq_clock(rq);
522
523 /*
524 * If the task has deadline < period, and the deadline is in the past,
525 * it should already be throttled before this check.
526 *
527 * See update_dl_entity() comments for further details.
528 */
529 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
530
531 dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
532}
533
534/*
535 * Regarding the deadline, a task with implicit deadline has a relative
536 * deadline == relative period. A task with constrained deadline has a
537 * relative deadline <= relative period.
538 *
539 * We support constrained deadline tasks. However, there are some restrictions
540 * applied only for tasks which do not have an implicit deadline. See
541 * update_dl_entity() to know more about such restrictions.
542 *
543 * The dl_is_implicit() returns true if the task has an implicit deadline.
544 */
545static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
546{
547 return dl_se->dl_deadline == dl_se->dl_period;
548}
549
550/*
551 * When a deadline entity is placed in the runqueue, its runtime and deadline
552 * might need to be updated. This is done by a CBS wake up rule. There are two
553 * different rules: 1) the original CBS; and 2) the Revisited CBS.
554 *
555 * When the task is starting a new period, the Original CBS is used. In this
556 * case, the runtime is replenished and a new absolute deadline is set.
557 *
558 * When a task is queued before the begin of the next period, using the
559 * remaining runtime and deadline could make the entity to overflow, see
560 * dl_entity_overflow() to find more about runtime overflow. When such case
561 * is detected, the runtime and deadline need to be updated.
562 *
563 * If the task has an implicit deadline, i.e., deadline == period, the Original
564 * CBS is applied. the runtime is replenished and a new absolute deadline is
565 * set, as in the previous cases.
566 *
567 * However, the Original CBS does not work properly for tasks with
568 * deadline < period, which are said to have a constrained deadline. By
569 * applying the Original CBS, a constrained deadline task would be able to run
570 * runtime/deadline in a period. With deadline < period, the task would
571 * overrun the runtime/period allowed bandwidth, breaking the admission test.
572 *
573 * In order to prevent this misbehave, the Revisited CBS is used for
574 * constrained deadline tasks when a runtime overflow is detected. In the
575 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
576 * the remaining runtime of the task is reduced to avoid runtime overflow.
577 * Please refer to the comments update_dl_revised_wakeup() function to find
578 * more about the Revised CBS rule.
508 */ 579 */
509static void update_dl_entity(struct sched_dl_entity *dl_se, 580static void update_dl_entity(struct sched_dl_entity *dl_se,
510 struct sched_dl_entity *pi_se) 581 struct sched_dl_entity *pi_se)
@@ -526,6 +597,14 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
526 597
527 if (dl_time_before(dl_se->deadline, rq_clock(rq)) || 598 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
528 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { 599 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
600
601 if (unlikely(!dl_is_implicit(dl_se) &&
602 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
603 !dl_se->dl_boosted)){
604 update_dl_revised_wakeup(dl_se, rq);
605 return;
606 }
607
529 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; 608 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
530 dl_se->runtime = pi_se->dl_runtime; 609 dl_se->runtime = pi_se->dl_runtime;
531 } 610 }
@@ -753,6 +832,8 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
753 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) 832 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
754 return; 833 return;
755 dl_se->dl_throttled = 1; 834 dl_se->dl_throttled = 1;
835 if (dl_se->runtime > 0)
836 dl_se->runtime = 0;
756 } 837 }
757} 838}
758 839
@@ -1011,11 +1092,6 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1011 __dequeue_dl_entity(dl_se); 1092 __dequeue_dl_entity(dl_se);
1012} 1093}
1013 1094
1014static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
1015{
1016 return dl_se->dl_deadline < dl_se->dl_period;
1017}
1018
1019static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) 1095static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1020{ 1096{
1021 struct task_struct *pi_task = rt_mutex_get_top_task(p); 1097 struct task_struct *pi_task = rt_mutex_get_top_task(p);
@@ -1047,7 +1123,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1047 * If that is the case, the task will be throttled and 1123 * If that is the case, the task will be throttled and
1048 * the replenishment timer will be set to the next period. 1124 * the replenishment timer will be set to the next period.
1049 */ 1125 */
1050 if (!p->dl.dl_throttled && dl_is_constrained(&p->dl)) 1126 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
1051 dl_check_constrained_dl(&p->dl); 1127 dl_check_constrained_dl(&p->dl);
1052 1128
1053 /* 1129 /*