aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo2012-03-05 15:15:19 -0600
committerJens Axboe2012-03-06 14:27:23 -0600
commit03aa264ac15637b6f98374270bcdf31400965505 (patch)
tree6fa9ca54d3f775fba19123790f6655158034a1d8 /block/cfq-iosched.c
parent4eef3049986e8397d5003916aed8cad6567a5e02 (diff)
downloadkernel-common-03aa264ac15637b6f98374270bcdf31400965505.tar.gz
kernel-common-03aa264ac15637b6f98374270bcdf31400965505.tar.xz
kernel-common-03aa264ac15637b6f98374270bcdf31400965505.zip
blkcg: let blkcg core manage per-queue blkg list and counter
With the previous patch to move blkg list heads and counters to request_queue and blkg, logic to manage them in both policies are almost identical and can be moved to blkcg core. This patch moves blkg link logic into blkg_lookup_create(), implements common blkg unlink code in blkg_destroy(), and updates blkg_destory_all() so that it's policy specific and can skip root group. The updated blkg_destroy_all() is now used to both clear queue for bypassing and elv switching, and release all blkgs on q exit. This patch introduces a race window where policy [de]registration may race against queue blkg clearing. This can only be a problem on cfq unload and shouldn't be a real problem in practice (and we have many other places where this race already exists). Future patches will remove these unlikely races. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c100
1 files changed, 6 insertions, 94 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e846803280a..dc73690dec4 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1045,14 +1045,6 @@ static void cfq_update_blkio_group_weight(struct request_queue *q,
1045 cfqg->needs_update = true; 1045 cfqg->needs_update = true;
1046} 1046}
1047 1047
1048static void cfq_link_blkio_group(struct request_queue *q,
1049 struct blkio_group *blkg)
1050{
1051 list_add(&blkg->q_node[BLKIO_POLICY_PROP],
1052 &q->blkg_list[BLKIO_POLICY_PROP]);
1053 q->nr_blkgs[BLKIO_POLICY_PROP]++;
1054}
1055
1056static void cfq_init_blkio_group(struct blkio_group *blkg) 1048static void cfq_init_blkio_group(struct blkio_group *blkg)
1057{ 1049{
1058 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1050 struct cfq_group *cfqg = blkg_to_cfqg(blkg);
@@ -1096,84 +1088,6 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1096 blkg_get(cfqg_to_blkg(cfqg)); 1088 blkg_get(cfqg_to_blkg(cfqg));
1097} 1089}
1098 1090
1099static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1100{
1101 struct blkio_group *blkg = cfqg_to_blkg(cfqg);
1102
1103 /* Something wrong if we are trying to remove same group twice */
1104 BUG_ON(list_empty(&blkg->q_node[BLKIO_POLICY_PROP]));
1105
1106 list_del_init(&blkg->q_node[BLKIO_POLICY_PROP]);
1107
1108 BUG_ON(cfqd->queue->nr_blkgs[BLKIO_POLICY_PROP] <= 0);
1109 cfqd->queue->nr_blkgs[BLKIO_POLICY_PROP]--;
1110
1111 /*
1112 * Put the reference taken at the time of creation so that when all
1113 * queues are gone, group can be destroyed.
1114 */
1115 blkg_put(cfqg_to_blkg(cfqg));
1116}
1117
1118static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
1119{
1120 struct request_queue *q = cfqd->queue;
1121 struct blkio_group *blkg, *n;
1122 bool empty = true;
1123
1124 list_for_each_entry_safe(blkg, n, &q->blkg_list[BLKIO_POLICY_PROP],
1125 q_node[BLKIO_POLICY_PROP]) {
1126 /*
1127 * If cgroup removal path got to blk_group first and removed
1128 * it from cgroup list, then it will take care of destroying
1129 * cfqg also.
1130 */
1131 if (!cfq_blkiocg_del_blkio_group(blkg))
1132 cfq_destroy_cfqg(cfqd, blkg_to_cfqg(blkg));
1133 else
1134 empty = false;
1135 }
1136 return empty;
1137}
1138
1139/*
1140 * Blk cgroup controller notification saying that blkio_group object is being
1141 * delinked as associated cgroup object is going away. That also means that
1142 * no new IO will come in this group. So get rid of this group as soon as
1143 * any pending IO in the group is finished.
1144 *
1145 * This function is called under rcu_read_lock(). key is the rcu protected
1146 * pointer. That means @q is a valid request_queue pointer as long as we
1147 * are rcu read lock.
1148 *
1149 * @q was fetched from blkio_group under blkio_cgroup->lock. That means
1150 * it should not be NULL as even if elevator was exiting, cgroup deltion
1151 * path got to it first.
1152 */
1153static void cfq_unlink_blkio_group(struct request_queue *q,
1154 struct blkio_group *blkg)
1155{
1156 struct cfq_data *cfqd = q->elevator->elevator_data;
1157 unsigned long flags;
1158
1159 spin_lock_irqsave(q->queue_lock, flags);
1160 cfq_destroy_cfqg(cfqd, blkg_to_cfqg(blkg));
1161 spin_unlock_irqrestore(q->queue_lock, flags);
1162}
1163
1164static struct elevator_type iosched_cfq;
1165
1166static bool cfq_clear_queue(struct request_queue *q)
1167{
1168 lockdep_assert_held(q->queue_lock);
1169
1170 /* shoot down blkgs iff the current elevator is cfq */
1171 if (!q->elevator || q->elevator->type != &iosched_cfq)
1172 return true;
1173
1174 return cfq_release_cfq_groups(q->elevator->elevator_data);
1175}
1176
1177#else /* GROUP_IOSCHED */ 1091#else /* GROUP_IOSCHED */
1178static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, 1092static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
1179 struct blkio_cgroup *blkcg) 1093 struct blkio_cgroup *blkcg)
@@ -1186,8 +1100,6 @@ cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1186 cfqq->cfqg = cfqg; 1100 cfqq->cfqg = cfqg;
1187} 1101}
1188 1102
1189static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1190
1191#endif /* GROUP_IOSCHED */ 1103#endif /* GROUP_IOSCHED */
1192 1104
1193/* 1105/*
@@ -3547,17 +3459,20 @@ static void cfq_exit_queue(struct elevator_queue *e)
3547 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 3459 __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3548 3460
3549 cfq_put_async_queues(cfqd); 3461 cfq_put_async_queues(cfqd);
3550 cfq_release_cfq_groups(cfqd); 3462
3463 spin_unlock_irq(q->queue_lock);
3464
3465 blkg_destroy_all(q, BLKIO_POLICY_PROP, true);
3551 3466
3552#ifdef CONFIG_BLK_CGROUP 3467#ifdef CONFIG_BLK_CGROUP
3553 /* 3468 /*
3554 * If there are groups which we could not unlink from blkcg list, 3469 * If there are groups which we could not unlink from blkcg list,
3555 * wait for a rcu period for them to be freed. 3470 * wait for a rcu period for them to be freed.
3556 */ 3471 */
3472 spin_lock_irq(q->queue_lock);
3557 wait = q->nr_blkgs[BLKIO_POLICY_PROP]; 3473 wait = q->nr_blkgs[BLKIO_POLICY_PROP];
3558#endif
3559 spin_unlock_irq(q->queue_lock); 3474 spin_unlock_irq(q->queue_lock);
3560 3475#endif
3561 cfq_shutdown_timer_wq(cfqd); 3476 cfq_shutdown_timer_wq(cfqd);
3562 3477
3563 /* 3478 /*
@@ -3794,9 +3709,6 @@ static struct elevator_type iosched_cfq = {
3794static struct blkio_policy_type blkio_policy_cfq = { 3709static struct blkio_policy_type blkio_policy_cfq = {
3795 .ops = { 3710 .ops = {
3796 .blkio_init_group_fn = cfq_init_blkio_group, 3711 .blkio_init_group_fn = cfq_init_blkio_group,
3797 .blkio_link_group_fn = cfq_link_blkio_group,
3798 .blkio_unlink_group_fn = cfq_unlink_blkio_group,
3799 .blkio_clear_queue_fn = cfq_clear_queue,
3800 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, 3712 .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
3801 }, 3713 },
3802 .plid = BLKIO_POLICY_PROP, 3714 .plid = BLKIO_POLICY_PROP,