summaryrefslogtreecommitdiff
path: root/xsa286-5.patch
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-10-12 04:00:49 +0000
committerCoprDistGit <infra@openeuler.org>2023-10-12 04:00:49 +0000
commitc22f60e6e55f1bf300dd76d2222a93911f3b2bb2 (patch)
treeef665e7018377f53612ac2751dcaea35a1c587b6 /xsa286-5.patch
parent39a4763249cd6289e5019acfe0c98dbb169f5f2e (diff)
automatic import of xenopeneuler22.03_LTS
Diffstat (limited to 'xsa286-5.patch')
-rw-r--r--xsa286-5.patch94
1 files changed, 94 insertions, 0 deletions
diff --git a/xsa286-5.patch b/xsa286-5.patch
new file mode 100644
index 0000000..01fa2c1
--- /dev/null
+++ b/xsa286-5.patch
@@ -0,0 +1,94 @@
+x86/mm: avoid using top level linear page tables in {,un}map_domain_page()
+
+Move the page table recursion two levels down. This entails avoiding
+to free the recursive mapping prematurely in free_perdomain_mappings().
+
+This is part of XSA-286.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: George Dunlap <george.dunlap@citrix.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/domain_page.c
++++ b/xen/arch/x86/domain_page.c
+@@ -65,7 +65,8 @@ void __init mapcache_override_current(st
+ #define mapcache_l2_entry(e) ((e) >> PAGETABLE_ORDER)
+ #define MAPCACHE_L2_ENTRIES (mapcache_l2_entry(MAPCACHE_ENTRIES - 1) + 1)
+ #define MAPCACHE_L1ENT(idx) \
+- __linear_l1_table[l1_linear_offset(MAPCACHE_VIRT_START + pfn_to_paddr(idx))]
++ ((l1_pgentry_t *)(MAPCACHE_VIRT_START | \
++ ((L2_PAGETABLE_ENTRIES - 1) << L2_PAGETABLE_SHIFT)))[idx]
+
+ void *map_domain_page(mfn_t mfn)
+ {
+@@ -235,6 +236,7 @@ int mapcache_domain_init(struct domain *
+ {
+ struct mapcache_domain *dcache = &d->arch.pv.mapcache;
+ unsigned int bitmap_pages;
++ int rc;
+
+ ASSERT(is_pv_domain(d));
+
+@@ -243,8 +245,10 @@ int mapcache_domain_init(struct domain *
+ return 0;
+ #endif
+
++ BUILD_BUG_ON(MAPCACHE_VIRT_START & ((1 << L3_PAGETABLE_SHIFT) - 1));
+ BUILD_BUG_ON(MAPCACHE_VIRT_END + PAGE_SIZE * (3 +
+- 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long))) >
++ 2 * PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long))) +
++ (1U << L2_PAGETABLE_SHIFT) >
+ MAPCACHE_VIRT_START + (PERDOMAIN_SLOT_MBYTES << 20));
+ bitmap_pages = PFN_UP(BITS_TO_LONGS(MAPCACHE_ENTRIES) * sizeof(long));
+ dcache->inuse = (void *)MAPCACHE_VIRT_END + PAGE_SIZE;
+@@ -253,9 +257,25 @@ int mapcache_domain_init(struct domain *
+
+ spin_lock_init(&dcache->lock);
+
+- return create_perdomain_mapping(d, (unsigned long)dcache->inuse,
+- 2 * bitmap_pages + 1,
+- NIL(l1_pgentry_t *), NULL);
++ rc = create_perdomain_mapping(d, (unsigned long)dcache->inuse,
++ 2 * bitmap_pages + 1,
++ NIL(l1_pgentry_t *), NULL);
++ if ( !rc )
++ {
++ /*
++ * Install mapping of our L2 table into its own last slot, for easy
++ * access to the L1 entries via MAPCACHE_L1ENT().
++ */
++ l3_pgentry_t *l3t = __map_domain_page(d->arch.perdomain_l3_pg);
++ l3_pgentry_t l3e = l3t[l3_table_offset(MAPCACHE_VIRT_END)];
++ l2_pgentry_t *l2t = map_l2t_from_l3e(l3e);
++
++ l2e_get_intpte(l2t[L2_PAGETABLE_ENTRIES - 1]) = l3e_get_intpte(l3e);
++ unmap_domain_page(l2t);
++ unmap_domain_page(l3t);
++ }
++
++ return rc;
+ }
+
+ int mapcache_vcpu_init(struct vcpu *v)
+@@ -346,7 +366,7 @@ mfn_t domain_page_map_to_mfn(const void
+ else
+ {
+ ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
+- pl1e = &__linear_l1_table[l1_linear_offset(va)];
++ pl1e = &MAPCACHE_L1ENT(PFN_DOWN(va - MAPCACHE_VIRT_START));
+ }
+
+ return l1e_get_mfn(*pl1e);
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -6024,6 +6024,10 @@ void free_perdomain_mappings(struct doma
+ {
+ struct page_info *l1pg = l2e_get_page(l2tab[j]);
+
++ /* mapcache_domain_init() installs a recursive entry. */
++ if ( l1pg == l2pg )
++ continue;
++
+ if ( l2e_get_flags(l2tab[j]) & _PAGE_AVAIL0 )
+ {
+ l1_pgentry_t *l1tab = __map_domain_page(l1pg);