From c22f60e6e55f1bf300dd76d2222a93911f3b2bb2 Mon Sep 17 00:00:00 2001 From: CoprDistGit Date: Thu, 12 Oct 2023 04:00:49 +0000 Subject: automatic import of xen --- ...6ce9-VT-d-sync_cache-misaligned-addresses.patch | 41 ++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 5f046ce9-VT-d-sync_cache-misaligned-addresses.patch (limited to '5f046ce9-VT-d-sync_cache-misaligned-addresses.patch') diff --git a/5f046ce9-VT-d-sync_cache-misaligned-addresses.patch b/5f046ce9-VT-d-sync_cache-misaligned-addresses.patch new file mode 100644 index 0000000..9f710c6 --- /dev/null +++ b/5f046ce9-VT-d-sync_cache-misaligned-addresses.patch @@ -0,0 +1,41 @@ +# Commit b6d9398144f21718d25daaf8d72669a75592abc5 +# Date 2020-07-07 14:39:05 +0200 +# Author Roger Pau Monné +# Committer Jan Beulich +vtd: don't assume addresses are aligned in sync_cache + +Current code in sync_cache assume that the address passed in is +aligned to a cache line size. Fix the code to support passing in +arbitrary addresses not necessarily aligned to a cache line size. + +This is part of XSA-321. + +Reported-by: Jan Beulich +Signed-off-by: Roger Pau Monné +Reviewed-by: Jan Beulich + +--- a/xen/drivers/passthrough/vtd/iommu.c ++++ b/xen/drivers/passthrough/vtd/iommu.c +@@ -143,8 +143,8 @@ static int iommus_incoherent; + + static void sync_cache(const void *addr, unsigned int size) + { +- int i; +- static unsigned int clflush_size = 0; ++ static unsigned long clflush_size = 0; ++ const void *end = addr + size; + + if ( !iommus_incoherent ) + return; +@@ -152,8 +152,9 @@ static void sync_cache(const void *addr, + if ( clflush_size == 0 ) + clflush_size = get_cache_line_size(); + +- for ( i = 0; i < size; i += clflush_size ) +- cacheline_flush((char *)addr + i); ++ addr -= (unsigned long)addr & (clflush_size - 1); ++ for ( ; addr < end; addr += clflush_size ) ++ cacheline_flush((char *)addr); + } + + /* Allocate page table, return its machine address */ -- cgit v1.2.3