summaryrefslogtreecommitdiffstats
path: root/system/xen/xsa/xsa328-post-xsa321-4.13-2.patch
blob: 1e48615f2b8dae384357ab0ba2d971e3c67c32ba (about) (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
From: <security@xenproject.org>
Subject: vtd: prune (and rename) cache flush functions

Rename __iommu_flush_cache to iommu_sync_cache and remove
iommu_flush_cache_page. Also remove the iommu_flush_cache_entry
wrapper and just use iommu_sync_cache instead. Note the _entry suffix
was meaningless as the wrapper was already taking a size parameter in
bytes. While there also constify the addr parameter.

No functional change intended.

This is part of XSA-321.

Reviewed-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -43,8 +43,7 @@ void disable_qinval(struct vtd_iommu *io
 int enable_intremap(struct vtd_iommu *iommu, int eim);
 void disable_intremap(struct vtd_iommu *iommu);
 
-void iommu_flush_cache_entry(void *addr, unsigned int size);
-void iommu_flush_cache_page(void *addr, unsigned long npages);
+void iommu_sync_cache(const void *addr, unsigned int size);
 int iommu_alloc(struct acpi_drhd_unit *drhd);
 void iommu_free(struct acpi_drhd_unit *drhd);
 
--- a/xen/drivers/passthrough/vtd/intremap.c
+++ b/xen/drivers/passthrough/vtd/intremap.c
@@ -230,7 +230,7 @@ static void free_remap_entry(struct vtd_
                      iremap_entries, iremap_entry);
 
     update_irte(iommu, iremap_entry, &new_ire, false);
-    iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
+    iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
     iommu_flush_iec_index(iommu, 0, index);
 
     unmap_vtd_domain_page(iremap_entries);
@@ -406,7 +406,7 @@ static int ioapic_rte_to_remap_entry(str
     }
 
     update_irte(iommu, iremap_entry, &new_ire, !init);
-    iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
+    iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
     iommu_flush_iec_index(iommu, 0, index);
 
     unmap_vtd_domain_page(iremap_entries);
@@ -695,7 +695,7 @@ static int msi_msg_to_remap_entry(
     update_irte(iommu, iremap_entry, &new_ire, msi_desc->irte_initialized);
     msi_desc->irte_initialized = true;
 
-    iommu_flush_cache_entry(iremap_entry, sizeof(*iremap_entry));
+    iommu_sync_cache(iremap_entry, sizeof(*iremap_entry));
     iommu_flush_iec_index(iommu, 0, index);
 
     unmap_vtd_domain_page(iremap_entries);
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -140,7 +140,8 @@ static int context_get_domain_id(struct
 }
 
 static int iommus_incoherent;
-static void __iommu_flush_cache(void *addr, unsigned int size)
+
+void iommu_sync_cache(const void *addr, unsigned int size)
 {
     int i;
     static unsigned int clflush_size = 0;
@@ -155,16 +156,6 @@ static void __iommu_flush_cache(void *ad
         cacheline_flush((char *)addr + i);
 }
 
-void iommu_flush_cache_entry(void *addr, unsigned int size)
-{
-    __iommu_flush_cache(addr, size);
-}
-
-void iommu_flush_cache_page(void *addr, unsigned long npages)
-{
-    __iommu_flush_cache(addr, PAGE_SIZE * npages);
-}
-
 /* Allocate page table, return its machine address */
 uint64_t alloc_pgtable_maddr(unsigned long npages, nodeid_t node)
 {
@@ -183,7 +174,7 @@ uint64_t alloc_pgtable_maddr(unsigned lo
         vaddr = __map_domain_page(cur_pg);
         memset(vaddr, 0, PAGE_SIZE);
 
-        iommu_flush_cache_page(vaddr, 1);
+        iommu_sync_cache(vaddr, PAGE_SIZE);
         unmap_domain_page(vaddr);
         cur_pg++;
     }
@@ -216,7 +207,7 @@ static u64 bus_to_context_maddr(struct v
         }
         set_root_value(*root, maddr);
         set_root_present(*root);
-        iommu_flush_cache_entry(root, sizeof(struct root_entry));
+        iommu_sync_cache(root, sizeof(struct root_entry));
     }
     maddr = (u64) get_context_addr(*root);
     unmap_vtd_domain_page(root_entries);
@@ -263,7 +254,7 @@ static u64 addr_to_dma_page_maddr(struct
              */
             dma_set_pte_readable(*pte);
             dma_set_pte_writable(*pte);
-            iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+            iommu_sync_cache(pte, sizeof(struct dma_pte));
         }
 
         if ( level == 2 )
@@ -640,7 +631,7 @@ static int __must_check dma_pte_clear_on
     *flush_flags |= IOMMU_FLUSHF_modified;
 
     spin_unlock(&hd->arch.mapping_lock);
-    iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+    iommu_sync_cache(pte, sizeof(struct dma_pte));
 
     unmap_vtd_domain_page(page);
 
@@ -679,7 +670,7 @@ static void iommu_free_page_table(struct
             iommu_free_pagetable(dma_pte_addr(*pte), next_level);
 
         dma_clear_pte(*pte);
-        iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+        iommu_sync_cache(pte, sizeof(struct dma_pte));
     }
 
     unmap_vtd_domain_page(pt_vaddr);
@@ -1400,7 +1391,7 @@ int domain_context_mapping_one(
     context_set_address_width(*context, agaw);
     context_set_fault_enable(*context);
     context_set_present(*context);
-    iommu_flush_cache_entry(context, sizeof(struct context_entry));
+    iommu_sync_cache(context, sizeof(struct context_entry));
     spin_unlock(&iommu->lock);
 
     /* Context entry was previously non-present (with domid 0). */
@@ -1564,7 +1555,7 @@ int domain_context_unmap_one(
 
     context_clear_present(*context);
     context_clear_entry(*context);
-    iommu_flush_cache_entry(context, sizeof(struct context_entry));
+    iommu_sync_cache(context, sizeof(struct context_entry));
 
     iommu_domid= domain_iommu_domid(domain, iommu);
     if ( iommu_domid == -1 )
@@ -1791,7 +1782,7 @@ static int __must_check intel_iommu_map_
 
     *pte = new;
 
-    iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+    iommu_sync_cache(pte, sizeof(struct dma_pte));
     spin_unlock(&hd->arch.mapping_lock);
     unmap_vtd_domain_page(page);
 
@@ -1866,7 +1857,7 @@ int iommu_pte_flush(struct domain *d, ui
     int iommu_domid;
     int rc = 0;
 
-    iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
+    iommu_sync_cache(pte, sizeof(struct dma_pte));
 
     for_each_drhd_unit ( drhd )
     {
@@ -2724,7 +2715,7 @@ static int __init intel_iommu_quarantine
             dma_set_pte_addr(*pte, maddr);
             dma_set_pte_readable(*pte);
         }
-        iommu_flush_cache_page(parent, 1);
+        iommu_sync_cache(parent, PAGE_SIZE);
 
         unmap_vtd_domain_page(parent);
         parent = map_vtd_domain_page(maddr);