aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZhao Heming2020-11-19 05:41:34 -0600
committerGreg Kroah-Hartman2020-12-30 04:54:25 -0600
commitd27d1942e173c1c719e00a596f5db60221b05b14 (patch)
tree66ba1061b434f2497103836509eb862988350a76
parent3ace8d52ee4afac46e4d718612efa09591bd7506 (diff)
downloadkernel-d27d1942e173c1c719e00a596f5db60221b05b14.tar.gz
kernel-d27d1942e173c1c719e00a596f5db60221b05b14.tar.xz
kernel-d27d1942e173c1c719e00a596f5db60221b05b14.zip
md/cluster: fix deadlock when node is doing resync job
commit bca5b0658020be90b6b504ca514fd80110204f71 upstream. md-cluster uses MD_CLUSTER_SEND_LOCK to make node can exclusively send msg. During sending msg, node can concurrently receive msg from another node. When node does resync job, grab token_lockres:EX may trigger a deadlock: ``` nodeA nodeB -------------------- -------------------- a. send METADATA_UPDATED held token_lockres:EX b. md_do_sync resync_info_update send RESYNCING + set MD_CLUSTER_SEND_LOCK + wait for holding token_lockres:EX c. mdadm /dev/md0 --remove /dev/sdg + held reconfig_mutex + send REMOVE + wait_event(MD_CLUSTER_SEND_LOCK) d. recv_daemon //METADATA_UPDATED from A process_metadata_update + (mddev_trylock(mddev) || MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD) //this time, both return false forever ``` Explaination: a. A send METADATA_UPDATED This will block another node to send msg b. B does sync jobs, which will send RESYNCING at intervals. This will be block for holding token_lockres:EX lock. c. B do "mdadm --remove", which will send REMOVE. This will be blocked by step <b>: MD_CLUSTER_SEND_LOCK is 1. d. B recv METADATA_UPDATED msg, which send from A in step <a>. This will be blocked by step <c>: holding mddev lock, it makes wait_event can't hold mddev lock. (btw, MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD keep ZERO in this scenario.) There is a similar deadlock in commit 0ba959774e93 ("md-cluster: use sync way to handle METADATA_UPDATED msg") In that commit, step c is "update sb". This patch step c is "mdadm --remove". For fixing this issue, we can refer the solution of function: metadata_update_start. Which does the same grab lock_token action. lock_comm can use the same steps to avoid deadlock. By moving MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD from lock_token to lock_comm. It enlarge a little bit window of MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, but it is safe & can break deadlock. Repro steps (I only triggered 3 times with hundreds tests): two nodes share 3 iSCSI luns: sdg/sdh/sdi. Each lun size is 1GB. ``` ssh root@node2 "mdadm -S --scan" mdadm -S --scan for i in {g,h,i};do dd if=/dev/zero of=/dev/sd$i oflag=direct bs=1M \ count=20; done mdadm -C /dev/md0 -b clustered -e 1.2 -n 2 -l mirror /dev/sdg /dev/sdh \ --bitmap-chunk=1M ssh root@node2 "mdadm -A /dev/md0 /dev/sdg /dev/sdh" sleep 5 mkfs.xfs /dev/md0 mdadm --manage --add /dev/md0 /dev/sdi mdadm --wait /dev/md0 mdadm --grow --raid-devices=3 /dev/md0 mdadm /dev/md0 --fail /dev/sdg mdadm /dev/md0 --remove /dev/sdg mdadm --grow --raid-devices=2 /dev/md0 ``` test script will hung when executing "mdadm --remove". ``` # dump stacks by "echo t > /proc/sysrq-trigger" md0_cluster_rec D 0 5329 2 0x80004000 Call Trace: __schedule+0x1f6/0x560 ? _cond_resched+0x2d/0x40 ? schedule+0x4a/0xb0 ? process_metadata_update.isra.0+0xdb/0x140 [md_cluster] ? wait_woken+0x80/0x80 ? process_recvd_msg+0x113/0x1d0 [md_cluster] ? recv_daemon+0x9e/0x120 [md_cluster] ? md_thread+0x94/0x160 [md_mod] ? wait_woken+0x80/0x80 ? md_congested+0x30/0x30 [md_mod] ? kthread+0x115/0x140 ? __kthread_bind_mask+0x60/0x60 ? ret_from_fork+0x1f/0x40 mdadm D 0 5423 1 0x00004004 Call Trace: __schedule+0x1f6/0x560 ? __schedule+0x1fe/0x560 ? schedule+0x4a/0xb0 ? lock_comm.isra.0+0x7b/0xb0 [md_cluster] ? wait_woken+0x80/0x80 ? remove_disk+0x4f/0x90 [md_cluster] ? hot_remove_disk+0xb1/0x1b0 [md_mod] ? md_ioctl+0x50c/0xba0 [md_mod] ? wait_woken+0x80/0x80 ? blkdev_ioctl+0xa2/0x2a0 ? block_ioctl+0x39/0x40 ? ksys_ioctl+0x82/0xc0 ? __x64_sys_ioctl+0x16/0x20 ? do_syscall_64+0x5f/0x150 ? entry_SYSCALL_64_after_hwframe+0x44/0xa9 md0_resync D 0 5425 2 0x80004000 Call Trace: __schedule+0x1f6/0x560 ? schedule+0x4a/0xb0 ? dlm_lock_sync+0xa1/0xd0 [md_cluster] ? wait_woken+0x80/0x80 ? lock_token+0x2d/0x90 [md_cluster] ? resync_info_update+0x95/0x100 [md_cluster] ? raid1_sync_request+0x7d3/0xa40 [raid1] ? md_do_sync.cold+0x737/0xc8f [md_mod] ? md_thread+0x94/0x160 [md_mod] ? md_congested+0x30/0x30 [md_mod] ? kthread+0x115/0x140 ? __kthread_bind_mask+0x60/0x60 ? ret_from_fork+0x1f/0x40 ``` At last, thanks for Xiao's solution. Cc: stable@vger.kernel.org Signed-off-by: Zhao Heming <heming.zhao@suse.com> Suggested-by: Xiao Ni <xni@redhat.com> Reviewed-by: Xiao Ni <xni@redhat.com> Signed-off-by: Song Liu <songliubraving@fb.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/md/md-cluster.c67
-rw-r--r--drivers/md/md.c6
2 files changed, 42 insertions, 31 deletions
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 4aaf4820b6f6..f0e64e76fd79 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -664,9 +664,27 @@ out:
664 * Takes the lock on the TOKEN lock resource so no other 664 * Takes the lock on the TOKEN lock resource so no other
665 * node can communicate while the operation is underway. 665 * node can communicate while the operation is underway.
666 */ 666 */
667static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked) 667static int lock_token(struct md_cluster_info *cinfo)
668{ 668{
669 int error, set_bit = 0; 669 int error;
670
671 error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
672 if (error) {
673 pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
674 __func__, __LINE__, error);
675 } else {
676 /* Lock the receive sequence */
677 mutex_lock(&cinfo->recv_mutex);
678 }
679 return error;
680}
681
682/* lock_comm()
683 * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
684 */
685static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
686{
687 int rv, set_bit = 0;
670 struct mddev *mddev = cinfo->mddev; 688 struct mddev *mddev = cinfo->mddev;
671 689
672 /* 690 /*
@@ -677,34 +695,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
677 */ 695 */
678 if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, 696 if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
679 &cinfo->state)) { 697 &cinfo->state)) {
680 error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, 698 rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
681 &cinfo->state); 699 &cinfo->state);
682 WARN_ON_ONCE(error); 700 WARN_ON_ONCE(rv);
683 md_wakeup_thread(mddev->thread); 701 md_wakeup_thread(mddev->thread);
684 set_bit = 1; 702 set_bit = 1;
685 } 703 }
686 error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
687 if (set_bit)
688 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
689 704
690 if (error)
691 pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
692 __func__, __LINE__, error);
693
694 /* Lock the receive sequence */
695 mutex_lock(&cinfo->recv_mutex);
696 return error;
697}
698
699/* lock_comm()
700 * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
701 */
702static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
703{
704 wait_event(cinfo->wait, 705 wait_event(cinfo->wait,
705 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state)); 706 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
706 707 rv = lock_token(cinfo);
707 return lock_token(cinfo, mddev_locked); 708 if (set_bit)
709 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
710 return rv;
708} 711}
709 712
710static void unlock_comm(struct md_cluster_info *cinfo) 713static void unlock_comm(struct md_cluster_info *cinfo)
@@ -784,9 +787,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg,
784{ 787{
785 int ret; 788 int ret;
786 789
787 lock_comm(cinfo, mddev_locked); 790 ret = lock_comm(cinfo, mddev_locked);
788 ret = __sendmsg(cinfo, cmsg); 791 if (!ret) {
789 unlock_comm(cinfo); 792 ret = __sendmsg(cinfo, cmsg);
793 unlock_comm(cinfo);
794 }
790 return ret; 795 return ret;
791} 796}
792 797
@@ -1061,7 +1066,7 @@ static int metadata_update_start(struct mddev *mddev)
1061 return 0; 1066 return 0;
1062 } 1067 }
1063 1068
1064 ret = lock_token(cinfo, 1); 1069 ret = lock_token(cinfo);
1065 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state); 1070 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
1066 return ret; 1071 return ret;
1067} 1072}
@@ -1255,7 +1260,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
1255 int raid_slot = -1; 1260 int raid_slot = -1;
1256 1261
1257 md_update_sb(mddev, 1); 1262 md_update_sb(mddev, 1);
1258 lock_comm(cinfo, 1); 1263 if (lock_comm(cinfo, 1)) {
1264 pr_err("%s: lock_comm failed\n", __func__);
1265 return;
1266 }
1259 1267
1260 memset(&cmsg, 0, sizeof(cmsg)); 1268 memset(&cmsg, 0, sizeof(cmsg));
1261 cmsg.type = cpu_to_le32(METADATA_UPDATED); 1269 cmsg.type = cpu_to_le32(METADATA_UPDATED);
@@ -1407,7 +1415,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
1407 cmsg.type = cpu_to_le32(NEWDISK); 1415 cmsg.type = cpu_to_le32(NEWDISK);
1408 memcpy(cmsg.uuid, uuid, 16); 1416 memcpy(cmsg.uuid, uuid, 16);
1409 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); 1417 cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
1410 lock_comm(cinfo, 1); 1418 if (lock_comm(cinfo, 1))
1419 return -EAGAIN;
1411 ret = __sendmsg(cinfo, &cmsg); 1420 ret = __sendmsg(cinfo, &cmsg);
1412 if (ret) { 1421 if (ret) {
1413 unlock_comm(cinfo); 1422 unlock_comm(cinfo);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 55f9ddf6f44f..3be74cf3635f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -6948,8 +6948,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
6948 goto busy; 6948 goto busy;
6949 6949
6950kick_rdev: 6950kick_rdev:
6951 if (mddev_is_clustered(mddev)) 6951 if (mddev_is_clustered(mddev)) {
6952 md_cluster_ops->remove_disk(mddev, rdev); 6952 if (md_cluster_ops->remove_disk(mddev, rdev))
6953 goto busy;
6954 }
6953 6955
6954 md_kick_rdev_from_array(rdev); 6956 md_kick_rdev_from_array(rdev);
6955 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 6957 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);