From c22f60e6e55f1bf300dd76d2222a93911f3b2bb2 Mon Sep 17 00:00:00 2001 From: CoprDistGit Date: Thu, 12 Oct 2023 04:00:49 +0000 Subject: automatic import of xen --- 5f046cb5-VT-d-prune-rename-cache-flush-funcs.patch | 179 +++++++++++++++++++++ 1 file changed, 179 insertions(+) create mode 100644 5f046cb5-VT-d-prune-rename-cache-flush-funcs.patch (limited to '5f046cb5-VT-d-prune-rename-cache-flush-funcs.patch') diff --git a/5f046cb5-VT-d-prune-rename-cache-flush-funcs.patch b/5f046cb5-VT-d-prune-rename-cache-flush-funcs.patch new file mode 100644 index 0000000..9b2e539 --- /dev/null +++ b/5f046cb5-VT-d-prune-rename-cache-flush-funcs.patch @@ -0,0 +1,179 @@ +# Commit 62298825b9a44f45761acbd758138b5ba059ebd1 +# Date 2020-07-07 14:38:13 +0200 +# Author Roger Pau Monné +# Committer Jan Beulich +vtd: prune (and rename) cache flush functions + +Rename __iommu_flush_cache to iommu_sync_cache and remove +iommu_flush_cache_page. Also remove the iommu_flush_cache_entry +wrapper and just use iommu_sync_cache instead. Note the _entry suffix +was meaningless as the wrapper was already taking a size parameter in +bytes. While there also constify the addr parameter. + +No functional change intended. + +This is part of XSA-321. + +Signed-off-by: Roger Pau Monné +Reviewed-by: Jan Beulich + +--- a/xen/drivers/passthrough/vtd/extern.h ++++ b/xen/drivers/passthrough/vtd/extern.h +@@ -43,8 +43,7 @@ void disable_qinval(struct vtd_iommu *io + int enable_intremap(struct vtd_iommu *iommu, int eim); + void disable_intremap(struct vtd_iommu *iommu); + +-void iommu_flush_cache_entry(void *addr, unsigned int size); +-void iommu_flush_cache_page(void *addr, unsigned long npages); ++void iommu_sync_cache(const void *addr, unsigned int size); + int iommu_alloc(struct acpi_drhd_unit *drhd); + void iommu_free(struct acpi_drhd_unit *drhd); + +--- a/xen/drivers/passthrough/vtd/intremap.c ++++ b/xen/drivers/passthrough/vtd/intremap.c +@@ -230,7 +230,7 @@ static void free_remap_entry(struct vtd_ + iremap_entries, iremap_entry); + + update_irte(iommu, iremap_entry, &new_ire, false); +- iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); ++ iommu_sync_cache(iremap_entry, sizeof(*iremap_entry)); + iommu_flush_iec_index(iommu, 0, index); + + unmap_vtd_domain_page(iremap_entries); +@@ -406,7 +406,7 @@ static int ioapic_rte_to_remap_entry(str + } + + update_irte(iommu, iremap_entry, &new_ire, !init); +- iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); ++ iommu_sync_cache(iremap_entry, sizeof(*iremap_entry)); + iommu_flush_iec_index(iommu, 0, index); + + unmap_vtd_domain_page(iremap_entries); +@@ -695,7 +695,7 @@ static int msi_msg_to_remap_entry( + update_irte(iommu, iremap_entry, &new_ire, msi_desc->irte_initialized); + msi_desc->irte_initialized = true; + +- iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry)); ++ iommu_sync_cache(iremap_entry, sizeof(*iremap_entry)); + iommu_flush_iec_index(iommu, 0, index); + + unmap_vtd_domain_page(iremap_entries); +--- a/xen/drivers/passthrough/vtd/iommu.c ++++ b/xen/drivers/passthrough/vtd/iommu.c +@@ -140,7 +140,8 @@ static int context_get_domain_id(struct + } + + static int iommus_incoherent; +-static void __iommu_flush_cache(void *addr, unsigned int size) ++ ++void iommu_sync_cache(const void *addr, unsigned int size) + { + int i; + static unsigned int clflush_size = 0; +@@ -155,16 +156,6 @@ static void __iommu_flush_cache(void *ad + cacheline_flush((char *)addr + i); + } + +-void iommu_flush_cache_entry(void *addr, unsigned int size) +-{ +- __iommu_flush_cache(addr, size); +-} +- +-void iommu_flush_cache_page(void *addr, unsigned long npages) +-{ +- __iommu_flush_cache(addr, PAGE_SIZE * npages); +-} +- + /* Allocate page table, return its machine address */ + uint64_t alloc_pgtable_maddr(unsigned long npages, nodeid_t node) + { +@@ -183,7 +174,7 @@ uint64_t alloc_pgtable_maddr(unsigned lo + vaddr = __map_domain_page(cur_pg); + memset(vaddr, 0, PAGE_SIZE); + +- iommu_flush_cache_page(vaddr, 1); ++ iommu_sync_cache(vaddr, PAGE_SIZE); + unmap_domain_page(vaddr); + cur_pg++; + } +@@ -216,7 +207,7 @@ static u64 bus_to_context_maddr(struct v + } + set_root_value(*root, maddr); + set_root_present(*root); +- iommu_flush_cache_entry(root, sizeof(struct root_entry)); ++ iommu_sync_cache(root, sizeof(struct root_entry)); + } + maddr = (u64) get_context_addr(*root); + unmap_vtd_domain_page(root_entries); +@@ -263,7 +254,7 @@ static u64 addr_to_dma_page_maddr(struct + */ + dma_set_pte_readable(*pte); + dma_set_pte_writable(*pte); +- iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); ++ iommu_sync_cache(pte, sizeof(struct dma_pte)); + } + + if ( level == 2 ) +@@ -640,7 +631,7 @@ static int __must_check dma_pte_clear_on + *flush_flags |= IOMMU_FLUSHF_modified; + + spin_unlock(&hd->arch.mapping_lock); +- iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); ++ iommu_sync_cache(pte, sizeof(struct dma_pte)); + + unmap_vtd_domain_page(page); + +@@ -679,7 +670,7 @@ static void iommu_free_page_table(struct + iommu_free_pagetable(dma_pte_addr(*pte), next_level); + + dma_clear_pte(*pte); +- iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); ++ iommu_sync_cache(pte, sizeof(struct dma_pte)); + } + + unmap_vtd_domain_page(pt_vaddr); +@@ -1400,7 +1391,7 @@ int domain_context_mapping_one( + context_set_address_width(*context, agaw); + context_set_fault_enable(*context); + context_set_present(*context); +- iommu_flush_cache_entry(context, sizeof(struct context_entry)); ++ iommu_sync_cache(context, sizeof(struct context_entry)); + spin_unlock(&iommu->lock); + + /* Context entry was previously non-present (with domid 0). */ +@@ -1564,7 +1555,7 @@ int domain_context_unmap_one( + + context_clear_present(*context); + context_clear_entry(*context); +- iommu_flush_cache_entry(context, sizeof(struct context_entry)); ++ iommu_sync_cache(context, sizeof(struct context_entry)); + + iommu_domid= domain_iommu_domid(domain, iommu); + if ( iommu_domid == -1 ) +@@ -1791,7 +1782,7 @@ static int __must_check intel_iommu_map_ + + *pte = new; + +- iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); ++ iommu_sync_cache(pte, sizeof(struct dma_pte)); + spin_unlock(&hd->arch.mapping_lock); + unmap_vtd_domain_page(page); + +@@ -1866,7 +1857,7 @@ int iommu_pte_flush(struct domain *d, ui + int iommu_domid; + int rc = 0; + +- iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); ++ iommu_sync_cache(pte, sizeof(struct dma_pte)); + + for_each_drhd_unit ( drhd ) + { +@@ -2724,7 +2715,7 @@ static int __init intel_iommu_quarantine + dma_set_pte_addr(*pte, maddr); + dma_set_pte_readable(*pte); + } +- iommu_flush_cache_page(parent, 1); ++ iommu_sync_cache(parent, PAGE_SIZE); + + unmap_vtd_domain_page(parent); + parent = map_vtd_domain_page(maddr); -- cgit v1.2.3