This is to make the difference to FLUSH_CACHE_WRITEBACK more explicit.
Requested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Jason Andryuk <jason.andryuk@amd.com>
if ( flags & FLUSH_HVM_ASID_CORE )
hvm_flush_guest_tlbs();
- if ( flags & (FLUSH_CACHE | FLUSH_CACHE_WRITEBACK) )
+ if ( flags & (FLUSH_CACHE_EVICT | FLUSH_CACHE_WRITEBACK) )
{
const struct cpuinfo_x86 *c = ¤t_cpu_data;
unsigned long sz = 0;
c->x86_clflush_size && c->x86_cache_size && sz &&
((sz >> 10) < c->x86_cache_size) )
{
- if ( flags & FLUSH_CACHE )
+ if ( flags & FLUSH_CACHE_EVICT )
cache_flush(va, sz);
else
cache_writeback(va, sz);
- flags &= ~(FLUSH_CACHE | FLUSH_CACHE_WRITEBACK);
+ flags &= ~(FLUSH_CACHE_EVICT | FLUSH_CACHE_WRITEBACK);
}
- else if ( flags & FLUSH_CACHE )
+ else if ( flags & FLUSH_CACHE_EVICT )
wbinvd();
else
wbnoinvd();
domain_pause_nosync(v->domain);
/* Flush physical caches. */
- flush_all(FLUSH_CACHE);
+ flush_all(FLUSH_CACHE_EVICT);
hvm_set_uc_mode(v, 1);
domain_unpause(v->domain);
break;
/* fall through */
default:
- flush_all(FLUSH_CACHE);
+ flush_all(FLUSH_CACHE_EVICT);
break;
}
return 0;
p2m_memory_type_changed(d);
if ( type != X86_MT_WB )
- flush_all(FLUSH_CACHE);
+ flush_all(FLUSH_CACHE_EVICT);
return rc;
}
(!boot_cpu_has(X86_FEATURE_XEN_SELFSNOOP) ||
(is_iommu_enabled(d) && !iommu_snoop)) )
{
- flush_all(FLUSH_CACHE);
+ flush_all(FLUSH_CACHE_EVICT);
}
}
static void cf_check svm_wbinvd_intercept(void)
{
if ( cache_flush_permitted(current->domain) )
- flush_all(FLUSH_CACHE);
+ flush_all(FLUSH_CACHE_EVICT);
}
static void svm_vmexit_do_invalidate_cache(struct cpu_user_regs *regs,
{
int cpu = v->arch.hvm.vmx.active_cpu;
if ( cpu != -1 )
- flush_mask(cpumask_of(cpu), FLUSH_CACHE);
+ flush_mask(cpumask_of(cpu), FLUSH_CACHE_EVICT);
}
vmx_clear_vmcs(v);
return;
if ( cpu_has_wbinvd_exiting )
- flush_all(FLUSH_CACHE);
+ flush_all(FLUSH_CACHE_EVICT);
else
wbinvd();
}
/* Flush TLBs (or parts thereof) including global mappings */
#define FLUSH_TLB_GLOBAL 0x200
/* Flush data caches */
-#define FLUSH_CACHE 0x400
+#define FLUSH_CACHE_EVICT 0x400
/* VA for the flush has a valid mapping */
#define FLUSH_VA_VALID 0x800
/* Flush CPU state */
if ( !cpumask_intersects(mask,
per_cpu(cpu_sibling_mask, cpu)) )
__cpumask_set_cpu(cpu, mask);
- flush_mask(mask, FLUSH_CACHE);
+ flush_mask(mask, FLUSH_CACHE_EVICT);
}
else
rc = -EACCES;
if ( (flags & _PAGE_PRESENT) && \
(((o_) ^ flags) & PAGE_CACHE_ATTRS) ) \
{ \
- flush_flags |= FLUSH_CACHE; \
+ flush_flags |= FLUSH_CACHE_EVICT; \
if ( virt >= DIRECTMAP_VIRT_START && \
virt < HYPERVISOR_VIRT_END ) \
flush_flags |= FLUSH_VA_VALID; \
else if ( op == x86emul_wbnoinvd /* && cpu_has_wbnoinvd */ )
flush_all(FLUSH_CACHE_WRITEBACK);
else
- flush_all(FLUSH_CACHE);
+ flush_all(FLUSH_CACHE_EVICT);
return X86EMUL_OKAY;
}