summaryrefslogtreecommitdiff
path: root/5f046c9a-VT-d-improve-IOMMU-TLB-flush.patch
diff options
context:
space:
mode:
Diffstat (limited to '5f046c9a-VT-d-improve-IOMMU-TLB-flush.patch')
-rw-r--r--5f046c9a-VT-d-improve-IOMMU-TLB-flush.patch35
1 files changed, 35 insertions, 0 deletions
diff --git a/5f046c9a-VT-d-improve-IOMMU-TLB-flush.patch b/5f046c9a-VT-d-improve-IOMMU-TLB-flush.patch
new file mode 100644
index 0000000..e87f09c
--- /dev/null
+++ b/5f046c9a-VT-d-improve-IOMMU-TLB-flush.patch
@@ -0,0 +1,35 @@
+# Commit 5fe515a0fede07543f2a3b049167b1fd8b873caf
+# Date 2020-07-07 14:37:46 +0200
+# Author Jan Beulich <jbeulich@suse.com>
+# Committer Jan Beulich <jbeulich@suse.com>
+vtd: improve IOMMU TLB flush
+
+Do not limit PSI flushes to order 0 pages, in order to avoid doing a
+full TLB flush if the passed in page has an order greater than 0 and
+is aligned. Should increase the performance of IOMMU TLB flushes when
+dealing with page orders greater than 0.
+
+This is part of XSA-321.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
+
+--- a/xen/drivers/passthrough/vtd/iommu.c
++++ b/xen/drivers/passthrough/vtd/iommu.c
+@@ -570,13 +570,14 @@ static int __must_check iommu_flush_iotl
+ if ( iommu_domid == -1 )
+ continue;
+
+- if ( page_count != 1 || dfn_eq(dfn, INVALID_DFN) )
++ if ( !page_count || (page_count & (page_count - 1)) ||
++ dfn_eq(dfn, INVALID_DFN) || !IS_ALIGNED(dfn_x(dfn), page_count) )
+ rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
+ 0, flush_dev_iotlb);
+ else
+ rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
+ dfn_to_daddr(dfn),
+- PAGE_ORDER_4K,
++ get_order_from_pages(page_count),
+ !dma_old_pte_present,
+ flush_dev_iotlb);
+