diff options
Diffstat (limited to '0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch')
-rw-r--r-- | 0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch | 108 |
1 files changed, 108 insertions, 0 deletions
diff --git a/0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch b/0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch new file mode 100644 index 0000000..eedac5e --- /dev/null +++ b/0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch @@ -0,0 +1,108 @@ +From 6e7d333625ecd9f7402c2e839338350fa86eaf45 Mon Sep 17 00:00:00 2001 +From: Hari Gowtham <hgowtham@redhat.com> +Date: Tue, 16 Apr 2019 17:07:37 +0530 +Subject: [PATCH 110/124] tier/shd/glusterd: with shd mux, the shd volfile path + have to be updated for tier-heald.t + +The volfile path for glustershd has been changed to volume based +from node based with the shd mux. And those changes for the +tier-heald.t test case have been made in this patch. + +label: DOWNSTREAM ONLY + +Change-Id: I0137f7e02c2bf3721dd51c6dfb215cd81b31d6ef +Signed-off-by: Hari Gowtham <hgowtham@redhat.com> +Reviewed-on: https://code.engineering.redhat.com/gerrit/168038 +Reviewed-by: Rafi Kavungal Chundattu Parambil <rkavunga@redhat.com> +Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com> +--- + tests/basic/tier/tier-heald.t | 35 ++++++++++++++++++++--------------- + 1 file changed, 20 insertions(+), 15 deletions(-) + +diff --git a/tests/basic/tier/tier-heald.t b/tests/basic/tier/tier-heald.t +index a8e634f..0ec9e43 100644 +--- a/tests/basic/tier/tier-heald.t ++++ b/tests/basic/tier/tier-heald.t +@@ -11,7 +11,7 @@ cleanup; + TEST glusterd + TEST pidof glusterd + +-volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol" ++r2_volfile=$(gluster system:: getwd)"/vols/r2/r2-shd.vol" + + # Commands should fail when both tiers are not of distribute type. + # Glustershd shouldn't be running as long as there are no replicate/disperse +@@ -34,51 +34,56 @@ TEST $CLI volume tier r2 attach $H0:$B0/r2_hot + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal r2 enable + EXPECT "enable" volume_option r2 "cluster.self-heal-daemon" +-EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon ++EXPECT "enable" volgen_volume_option $r2_volfile r2-replicate-0 cluster replicate self-heal-daemon + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal r2 disable + EXPECT "disable" volume_option r2 "cluster.self-heal-daemon" +-EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon ++EXPECT "disable" volgen_volume_option $r2_volfile r2-replicate-0 cluster replicate self-heal-daemon + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + # Commands should work on disperse volume. + TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2 + TEST $CLI volume start ec2 + ++ec2_volfile=$(gluster system:: getwd)"/vols/ec2/ec2-shd.vol" ++ + TEST $CLI volume tier ec2 attach replica 2 $H0:$B0/ec2_hot{1..4} + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal ec2 enable + EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon" +-EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon ++EXPECT "enable" volgen_volume_option $ec2_volfile ec2-disperse-0 cluster disperse self-heal-daemon + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + TEST $CLI volume heal ec2 disable + EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon" +-EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon ++EXPECT "disable" volgen_volume_option $ec2_volfile ec2-disperse-0 cluster disperse self-heal-daemon + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid + + #Check that shd graph is rewritten correctly on volume stop/start +-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse +-EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate ++EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse ++EXPECT "Y" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate + TEST $CLI volume stop r2 +-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse +-EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate ++EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse ++ ++# Has been commented as the validations after stop using volfile dont hold true. ++#EXPECT "N" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate + TEST $CLI volume stop ec2 + # When both the volumes are stopped glustershd volfile is not modified just the + # process is stopped + TEST "[ -z $(get_shd_process_pid) ]" + + TEST $CLI volume start r2 +-EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse +-EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate ++# Has been commented as the validations after stop using volfile dont hold true. ++#EXPECT "N" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse ++EXPECT "Y" volgen_volume_exists $r2_volfile r2-replicate-0 cluster replicate + + TEST $CLI volume start ec2 + +-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse +-EXPECT "Y" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate ++EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse ++EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-replicate-0 cluster replicate + + TEST $CLI volume tier ec2 detach force + +-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse +-EXPECT "N" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate ++EXPECT "Y" volgen_volume_exists $ec2_volfile ec2-disperse-0 cluster disperse ++EXPECT "N" volgen_volume_exists $ec2_volfile ec2-replicate-0 cluster replicate + + TEST $CLI volume set r2 self-heal-daemon on + TEST $CLI volume set r2 cluster.self-heal-daemon off +-- +1.8.3.1 + |