blob: 9f710c6b7dca15fcf62cba6a5f965c37aa486a1b (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
|
# Commit b6d9398144f21718d25daaf8d72669a75592abc5
# Date 2020-07-07 14:39:05 +0200
# Author Roger Pau Monné <roger.pau@citrix.com>
# Committer Jan Beulich <jbeulich@suse.com>
vtd: don't assume addresses are aligned in sync_cache
Current code in sync_cache assume that the address passed in is
aligned to a cache line size. Fix the code to support passing in
arbitrary addresses not necessarily aligned to a cache line size.
This is part of XSA-321.
Reported-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -143,8 +143,8 @@ static int iommus_incoherent;
static void sync_cache(const void *addr, unsigned int size)
{
- int i;
- static unsigned int clflush_size = 0;
+ static unsigned long clflush_size = 0;
+ const void *end = addr + size;
if ( !iommus_incoherent )
return;
@@ -152,8 +152,9 @@ static void sync_cache(const void *addr,
if ( clflush_size == 0 )
clflush_size = get_cache_line_size();
- for ( i = 0; i < size; i += clflush_size )
- cacheline_flush((char *)addr + i);
+ addr -= (unsigned long)addr & (clflush_size - 1);
+ for ( ; addr < end; addr += clflush_size )
+ cacheline_flush((char *)addr);
}
/* Allocate page table, return its machine address */
|