summaryrefslogtreecommitdiff
path: root/5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-10-12 04:00:49 +0000
committerCoprDistGit <infra@openeuler.org>2023-10-12 04:00:49 +0000
commitc22f60e6e55f1bf300dd76d2222a93911f3b2bb2 (patch)
treeef665e7018377f53612ac2751dcaea35a1c587b6 /5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch
parent39a4763249cd6289e5019acfe0c98dbb169f5f2e (diff)
automatic import of xenopeneuler22.03_LTS
Diffstat (limited to '5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch')
-rw-r--r--5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch57
1 files changed, 57 insertions, 0 deletions
diff --git a/5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch b/5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch
new file mode 100644
index 0000000..7d21a6f
--- /dev/null
+++ b/5edf6ad8-ioreq-pending-emulation-server-destruction-race.patch
@@ -0,0 +1,57 @@
+# Commit f7039ee41b3d3448775a1623f230037fd0455104
+# Date 2020-06-09 12:56:24 +0200
+# Author Paul Durrant <pdurrant@amazon.com>
+# Committer Jan Beulich <jbeulich@suse.com>
+ioreq: handle pending emulation racing with ioreq server destruction
+
+When an emulation request is initiated in hvm_send_ioreq() the guest vcpu is
+blocked on an event channel until that request is completed. If, however,
+the emulator is killed whilst that emulation is pending then the ioreq
+server may be destroyed. Thus when the vcpu is awoken the code in
+handle_hvm_io_completion() will find no pending request to wait for, but will
+leave the internal vcpu io_req.state set to IOREQ_READY and the vcpu shutdown
+deferall flag in place (because hvm_io_assist() will never be called). The
+emulation request is then completed anyway. This means that any subsequent call
+to hvmemul_do_io() will find an unexpected value in io_req.state and will
+return X86EMUL_UNHANDLEABLE, which in some cases will result in continuous
+re-tries.
+
+This patch fixes the issue by moving the setting of io_req.state and clearing
+of shutdown deferral (as will as MSI-X write completion) out of hvm_io_assist()
+and directly into handle_hvm_io_completion().
+
+Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Signed-off-by: Paul Durrant <pdurrant@amazon.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/hvm/ioreq.c
++++ b/xen/arch/x86/hvm/ioreq.c
+@@ -107,15 +107,7 @@ static void hvm_io_assist(struct hvm_ior
+ ioreq_t *ioreq = &v->arch.hvm.hvm_io.io_req;
+
+ if ( hvm_ioreq_needs_completion(ioreq) )
+- {
+- ioreq->state = STATE_IORESP_READY;
+ ioreq->data = data;
+- }
+- else
+- ioreq->state = STATE_IOREQ_NONE;
+-
+- msix_write_completion(v);
+- vcpu_end_shutdown_deferral(v);
+
+ sv->pending = false;
+ }
+@@ -207,6 +199,12 @@ bool handle_hvm_io_completion(struct vcp
+ }
+ }
+
++ vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
++ STATE_IORESP_READY : STATE_IOREQ_NONE;
++
++ msix_write_completion(v);
++ vcpu_end_shutdown_deferral(v);
++
+ io_completion = vio->io_completion;
+ vio->io_completion = HVMIO_no_completion;
+