aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo2012-04-13 15:11:26 -0500
committerJens Axboe2012-04-20 03:06:06 -0500
commitbc0d6501a844392ab6ad419d7ca5af4693b6afac (patch)
tree08375aff242a2efd35d830c7636ab61a3ec6a109 /block/blk-cgroup.c
parentf48ec1d7885281a9c6cd7779d61f321d1b1fd741 (diff)
downloadkernel-common-bc0d6501a844392ab6ad419d7ca5af4693b6afac.tar.gz
kernel-common-bc0d6501a844392ab6ad419d7ca5af4693b6afac.tar.xz
kernel-common-bc0d6501a844392ab6ad419d7ca5af4693b6afac.zip
blkcg: kill blkio_list and replace blkio_list_lock with a mutex
With blkio_policy[], blkio_list is redundant and hinders with per-queue policy activation. Remove it. Also, replace blkio_list_lock with a mutex blkcg_pol_mutex and let it protect the whole [un]registration. This is to prepare for per-queue policy activation and doesn't cause any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 9449c383b7b..af665fe7f4f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -24,9 +24,7 @@
24 24
25#define MAX_KEY_LEN 100 25#define MAX_KEY_LEN 100
26 26
27static DEFINE_SPINLOCK(blkio_list_lock); 27static DEFINE_MUTEX(blkcg_pol_mutex);
28static LIST_HEAD(blkio_list);
29
30static DEFINE_MUTEX(all_q_mutex); 28static DEFINE_MUTEX(all_q_mutex);
31static LIST_HEAD(all_q_list); 29static LIST_HEAD(all_q_list);
32 30
@@ -311,8 +309,9 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
311 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); 309 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
312 struct blkio_group *blkg; 310 struct blkio_group *blkg;
313 struct hlist_node *n; 311 struct hlist_node *n;
312 int i;
314 313
315 spin_lock(&blkio_list_lock); 314 mutex_lock(&blkcg_pol_mutex);
316 spin_lock_irq(&blkcg->lock); 315 spin_lock_irq(&blkcg->lock);
317 316
318 /* 317 /*
@@ -321,15 +320,16 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
321 * anyway. If you get hit by a race, retry. 320 * anyway. If you get hit by a race, retry.
322 */ 321 */
323 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 322 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
324 struct blkio_policy_type *pol; 323 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
324 struct blkio_policy_type *pol = blkio_policy[i];
325 325
326 list_for_each_entry(pol, &blkio_list, list) 326 if (pol && pol->ops.blkio_reset_group_stats_fn)
327 if (pol->ops.blkio_reset_group_stats_fn)
328 pol->ops.blkio_reset_group_stats_fn(blkg); 327 pol->ops.blkio_reset_group_stats_fn(blkg);
328 }
329 } 329 }
330 330
331 spin_unlock_irq(&blkcg->lock); 331 spin_unlock_irq(&blkcg->lock);
332 spin_unlock(&blkio_list_lock); 332 mutex_unlock(&blkcg_pol_mutex);
333 return 0; 333 return 0;
334} 334}
335 335
@@ -732,20 +732,21 @@ void blkio_policy_register(struct blkio_policy_type *blkiop)
732{ 732{
733 struct request_queue *q; 733 struct request_queue *q;
734 734
735 mutex_lock(&blkcg_pol_mutex);
736
735 blkcg_bypass_start(); 737 blkcg_bypass_start();
736 spin_lock(&blkio_list_lock);
737 738
738 BUG_ON(blkio_policy[blkiop->plid]); 739 BUG_ON(blkio_policy[blkiop->plid]);
739 blkio_policy[blkiop->plid] = blkiop; 740 blkio_policy[blkiop->plid] = blkiop;
740 list_add_tail(&blkiop->list, &blkio_list);
741
742 spin_unlock(&blkio_list_lock);
743 list_for_each_entry(q, &all_q_list, all_q_node) 741 list_for_each_entry(q, &all_q_list, all_q_node)
744 update_root_blkg_pd(q, blkiop->plid); 742 update_root_blkg_pd(q, blkiop->plid);
743
745 blkcg_bypass_end(); 744 blkcg_bypass_end();
746 745
747 if (blkiop->cftypes) 746 if (blkiop->cftypes)
748 WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes)); 747 WARN_ON(cgroup_add_cftypes(&blkio_subsys, blkiop->cftypes));
748
749 mutex_unlock(&blkcg_pol_mutex);
749} 750}
750EXPORT_SYMBOL_GPL(blkio_policy_register); 751EXPORT_SYMBOL_GPL(blkio_policy_register);
751 752
@@ -753,19 +754,20 @@ void blkio_policy_unregister(struct blkio_policy_type *blkiop)
753{ 754{
754 struct request_queue *q; 755 struct request_queue *q;
755 756
757 mutex_lock(&blkcg_pol_mutex);
758
756 if (blkiop->cftypes) 759 if (blkiop->cftypes)
757 cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes); 760 cgroup_rm_cftypes(&blkio_subsys, blkiop->cftypes);
758 761
759 blkcg_bypass_start(); 762 blkcg_bypass_start();
760 spin_lock(&blkio_list_lock);
761 763
762 BUG_ON(blkio_policy[blkiop->plid] != blkiop); 764 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
763 blkio_policy[blkiop->plid] = NULL; 765 blkio_policy[blkiop->plid] = NULL;
764 list_del_init(&blkiop->list);
765 766
766 spin_unlock(&blkio_list_lock);
767 list_for_each_entry(q, &all_q_list, all_q_node) 767 list_for_each_entry(q, &all_q_list, all_q_node)
768 update_root_blkg_pd(q, blkiop->plid); 768 update_root_blkg_pd(q, blkiop->plid);
769 blkcg_bypass_end(); 769 blkcg_bypass_end();
770
771 mutex_unlock(&blkcg_pol_mutex);
770} 772}
771EXPORT_SYMBOL_GPL(blkio_policy_unregister); 773EXPORT_SYMBOL_GPL(blkio_policy_unregister);