summaryrefslogtreecommitdiff
path: root/0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
diff options
context:
space:
mode:
Diffstat (limited to '0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch')
-rw-r--r--0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch141
1 files changed, 141 insertions, 0 deletions
diff --git a/0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch b/0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
new file mode 100644
index 0000000..0dc15ba
--- /dev/null
+++ b/0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
@@ -0,0 +1,141 @@
+From 998d9b8b5e271f407e1c654c34f45f0db36abc71 Mon Sep 17 00:00:00 2001
+From: Mohammed Rafi KC <rkavunga@redhat.com>
+Date: Tue, 21 May 2019 17:15:07 +0530
+Subject: [PATCH 172/178] ec/fini: Fix race with ec_fini and ec_notify
+
+During a graph cleanup, we first sent a PARENT_DOWN and wait for
+a child down to ultimately free the xlator and the graph.
+
+In the ec xlator, we cleanup the threads when we get a PARENT_DOWN event.
+But a racing event like CHILD_UP or event xl_op may trigger healing threads
+after threads cleanup.
+
+So there is a chance that the threads might access a freed private variabe
+
+Upstream patch: https://review.gluster.org/#/c/glusterfs/+/22758/
+
+>Change-Id: I252d10181bb67b95900c903d479de707a8489532
+>fixes: bz#1703948
+>Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
+
+Change-Id: I84a10352d9fb3e68d4147b3791e3af45ab79050e
+BUG: 1703434
+Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
+Reviewed-on: https://code.engineering.redhat.com/gerrit/172285
+Tested-by: RHGS Build Bot <nigelb@redhat.com>
+Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
+---
+ libglusterfs/src/glusterfs/xlator.h | 3 +++
+ libglusterfs/src/libglusterfs.sym | 1 +
+ libglusterfs/src/xlator.c | 21 +++++++++++++++++++++
+ xlators/cluster/ec/src/ec-heal.c | 4 ++++
+ xlators/cluster/ec/src/ec-heald.c | 6 ++++++
+ xlators/cluster/ec/src/ec.c | 3 +++
+ 6 files changed, 38 insertions(+)
+
+diff --git a/libglusterfs/src/glusterfs/xlator.h b/libglusterfs/src/glusterfs/xlator.h
+index 8998976..09e463e 100644
+--- a/libglusterfs/src/glusterfs/xlator.h
++++ b/libglusterfs/src/glusterfs/xlator.h
+@@ -1092,4 +1092,7 @@ gluster_graph_take_reference(xlator_t *tree);
+
+ gf_boolean_t
+ mgmt_is_multiplexed_daemon(char *name);
++
++gf_boolean_t
++xlator_is_cleanup_starting(xlator_t *this);
+ #endif /* _XLATOR_H */
+diff --git a/libglusterfs/src/libglusterfs.sym b/libglusterfs/src/libglusterfs.sym
+index ec474e7..7a2edef 100644
+--- a/libglusterfs/src/libglusterfs.sym
++++ b/libglusterfs/src/libglusterfs.sym
+@@ -1161,3 +1161,4 @@ glusterfs_process_svc_attach_volfp
+ glusterfs_mux_volfile_reconfigure
+ glusterfs_process_svc_detach
+ mgmt_is_multiplexed_daemon
++xlator_is_cleanup_starting
+diff --git a/libglusterfs/src/xlator.c b/libglusterfs/src/xlator.c
+index 022c3ed..fbfbbe2 100644
+--- a/libglusterfs/src/xlator.c
++++ b/libglusterfs/src/xlator.c
+@@ -1486,3 +1486,24 @@ mgmt_is_multiplexed_daemon(char *name)
+ }
+ return _gf_false;
+ }
++
++gf_boolean_t
++xlator_is_cleanup_starting(xlator_t *this)
++{
++ gf_boolean_t cleanup = _gf_false;
++ glusterfs_graph_t *graph = NULL;
++ xlator_t *xl = NULL;
++
++ if (!this)
++ goto out;
++ graph = this->graph;
++
++ if (!graph)
++ goto out;
++
++ xl = graph->first;
++ if (xl && xl->cleanup_starting)
++ cleanup = _gf_true;
++out:
++ return cleanup;
++}
+diff --git a/xlators/cluster/ec/src/ec-heal.c b/xlators/cluster/ec/src/ec-heal.c
+index 2fa1f11..8844c29 100644
+--- a/xlators/cluster/ec/src/ec-heal.c
++++ b/xlators/cluster/ec/src/ec-heal.c
+@@ -2855,6 +2855,10 @@ ec_replace_brick_heal_wrap(void *opaque)
+ itable = ec->xl->itable;
+ else
+ goto out;
++
++ if (xlator_is_cleanup_starting(ec->xl))
++ goto out;
++
+ ret = ec_replace_heal(ec, itable->root);
+ out:
+ return ret;
+diff --git a/xlators/cluster/ec/src/ec-heald.c b/xlators/cluster/ec/src/ec-heald.c
+index edf5e11..91512d7 100644
+--- a/xlators/cluster/ec/src/ec-heald.c
++++ b/xlators/cluster/ec/src/ec-heald.c
+@@ -444,6 +444,9 @@ unlock:
+ int
+ ec_shd_full_healer_spawn(xlator_t *this, int subvol)
+ {
++ if (xlator_is_cleanup_starting(this))
++ return -1;
++
+ return ec_shd_healer_spawn(this, NTH_FULL_HEALER(this, subvol),
+ ec_shd_full_healer);
+ }
+@@ -451,6 +454,9 @@ ec_shd_full_healer_spawn(xlator_t *this, int subvol)
+ int
+ ec_shd_index_healer_spawn(xlator_t *this, int subvol)
+ {
++ if (xlator_is_cleanup_starting(this))
++ return -1;
++
+ return ec_shd_healer_spawn(this, NTH_INDEX_HEALER(this, subvol),
+ ec_shd_index_healer);
+ }
+diff --git a/xlators/cluster/ec/src/ec.c b/xlators/cluster/ec/src/ec.c
+index 264582a..df5912c 100644
+--- a/xlators/cluster/ec/src/ec.c
++++ b/xlators/cluster/ec/src/ec.c
+@@ -486,6 +486,9 @@ ec_set_up_state(ec_t *ec, uintptr_t index_mask, uintptr_t new_state)
+ {
+ uintptr_t current_state = 0;
+
++ if (xlator_is_cleanup_starting(ec->xl))
++ return _gf_false;
++
+ if ((ec->xl_notify & index_mask) == 0) {
+ ec->xl_notify |= index_mask;
+ ec->xl_notify_count++;
+--
+1.8.3.1
+