summaryrefslogtreecommitdiff
path: root/xsa340.patch
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-10-12 04:00:49 +0000
committerCoprDistGit <infra@openeuler.org>2023-10-12 04:00:49 +0000
commitc22f60e6e55f1bf300dd76d2222a93911f3b2bb2 (patch)
treeef665e7018377f53612ac2751dcaea35a1c587b6 /xsa340.patch
parent39a4763249cd6289e5019acfe0c98dbb169f5f2e (diff)
automatic import of xenopeneuler22.03_LTS
Diffstat (limited to 'xsa340.patch')
-rw-r--r--xsa340.patch62
1 files changed, 62 insertions, 0 deletions
diff --git a/xsa340.patch b/xsa340.patch
new file mode 100644
index 0000000..80aa95a
--- /dev/null
+++ b/xsa340.patch
@@ -0,0 +1,62 @@
+xen/evtchn: Add missing barriers when accessing/allocating an event channel
+
+While the allocation of a bucket is always performed with the per-domain
+lock, the bucket may be accessed without the lock taken (for instance, see
+evtchn_send()).
+
+Instead such sites relies on port_is_valid() to return a non-zero value
+when the port has a struct evtchn associated to it. The function will
+mostly check whether the port is less than d->valid_evtchns as all the
+buckets/event channels should be allocated up to that point.
+
+Unfortunately a compiler is free to re-order the assignment in
+evtchn_allocate_port() so it would be possible to have d->valid_evtchns
+updated before the new bucket has finish to allocate.
+
+Additionally on Arm, even if this was compiled "correctly", the
+processor can still re-order the memory access.
+
+Add a write memory barrier in the allocation side and a read memory
+barrier when the port is valid to prevent any re-ordering issue.
+
+This is XSA-340.
+
+Signed-off-by: Julien Grall <jgrall@amazon.com>
+
+--- a/xen/common/event_channel.c
++++ b/xen/common/event_channel.c
+@@ -178,6 +178,13 @@ int evtchn_allocate_port(struct domain *
+ return -ENOMEM;
+ bucket_from_port(d, port) = chn;
+
++ /*
++ * d->valid_evtchns is used to check whether the bucket can be
++ * accessed without the per-domain lock. Therefore,
++ * d->valid_evtchns should be seen *after* the new bucket has
++ * been setup.
++ */
++ smp_wmb();
+ write_atomic(&d->valid_evtchns, d->valid_evtchns + EVTCHNS_PER_BUCKET);
+ }
+
+--- a/xen/include/xen/event.h
++++ b/xen/include/xen/event.h
+@@ -107,7 +107,17 @@ void notify_via_xen_event_channel(struct
+
+ static inline bool_t port_is_valid(struct domain *d, unsigned int p)
+ {
+- return p < read_atomic(&d->valid_evtchns);
++ if ( p >= read_atomic(&d->valid_evtchns) )
++ return false;
++
++ /*
++ * The caller will usually access the event channel afterwards and
++ * may be done without taking the per-domain lock. The barrier is
++ * going in pair the smp_wmb() barrier in evtchn_allocate_port().
++ */
++ smp_rmb();
++
++ return true;
+ }
+
+ static inline struct evtchn *evtchn_from_port(struct domain *d, unsigned int p)