x86/cpu: KVM: Add common defines for architectural memory types (PAT, MTRRs, etc.)

Add defines for the architectural memory types that can be shoved into
various MSRs and registers, e.g. MTRRs, PAT, VMX capabilities MSRs, EPTPs,
etc.  While most MSRs/registers support only a subset of all memory types,
the values themselves are architectural and identical across all users.

Leave the goofy MTRR_TYPE_* definitions as-is since they are in a uapi
header, but add compile-time assertions to connect the dots (and sanity
check that the msr-index.h values didn't get fat-fingered).

Keep the VMX_EPTP_MT_* defines so that it's slightly more obvious that the
EPTP holds a single memory type in 3 of its 64 bits; those bits just
happen to be 2:0, i.e. don't need to be shifted.

Opportunistically use X86_MEMTYPE_WB instead of an open coded '6' in
setup_vmcs_config().

No functional change intended.

Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Link: https://lore.kernel.org/r/20240605231918.2915961-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
Sean Christopherson 2024-06-05 16:19:09 -07:00
parent 47ac09b91b
commit e7e80b66fb
6 changed files with 37 additions and 26 deletions

View file

@ -36,6 +36,20 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/*
* Architectural memory types that are common to MTRRs, PAT, VMX MSRs, etc.
* Most MSRs support/allow only a subset of memory types, but the values
* themselves are common across all relevant MSRs.
*/
#define X86_MEMTYPE_UC 0ull /* Uncacheable, a.k.a. Strong Uncacheable */
#define X86_MEMTYPE_WC 1ull /* Write Combining */
/* RESERVED 2 */
/* RESERVED 3 */
#define X86_MEMTYPE_WT 4ull /* Write Through */
#define X86_MEMTYPE_WP 5ull /* Write Protected */
#define X86_MEMTYPE_WB 6ull /* Write Back */
#define X86_MEMTYPE_UC_MINUS 7ull /* Weak Uncacheabled (PAT only) */
/* FRED MSRs */
#define MSR_IA32_FRED_RSP0 0x1cc /* Level 0 stack pointer */
#define MSR_IA32_FRED_RSP1 0x1cd /* Level 1 stack pointer */
@ -1163,7 +1177,6 @@
#define VMX_BASIC_64 0x0001000000000000LLU
#define VMX_BASIC_MEM_TYPE_SHIFT 50
#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
#define VMX_BASIC_MEM_TYPE_WB 6LLU
#define VMX_BASIC_INOUT 0x0040000000000000LLU
/* Resctrl MSRs: */

View file

@ -508,9 +508,10 @@ enum vmcs_field {
#define VMX_EPTP_PWL_4 0x18ull
#define VMX_EPTP_PWL_5 0x20ull
#define VMX_EPTP_AD_ENABLE_BIT (1ull << 6)
/* The EPTP memtype is encoded in bits 2:0, i.e. doesn't need to be shifted. */
#define VMX_EPTP_MT_MASK 0x7ull
#define VMX_EPTP_MT_WB 0x6ull
#define VMX_EPTP_MT_UC 0x0ull
#define VMX_EPTP_MT_WB X86_MEMTYPE_WB
#define VMX_EPTP_MT_UC X86_MEMTYPE_UC
#define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull

View file

@ -55,6 +55,12 @@
#include "mtrr.h"
static_assert(X86_MEMTYPE_UC == MTRR_TYPE_UNCACHABLE);
static_assert(X86_MEMTYPE_WC == MTRR_TYPE_WRCOMB);
static_assert(X86_MEMTYPE_WT == MTRR_TYPE_WRTHROUGH);
static_assert(X86_MEMTYPE_WP == MTRR_TYPE_WRPROT);
static_assert(X86_MEMTYPE_WB == MTRR_TYPE_WRBACK);
/* arch_phys_wc_add returns an MTRR register index plus this offset. */
#define MTRR_TO_PHYS_WC_OFFSET 1000

View file

@ -7070,7 +7070,7 @@ static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs)
VMCS12_REVISION |
VMX_BASIC_TRUE_CTLS |
((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
(VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
(X86_MEMTYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
if (cpu_has_vmx_basic_inout())
msrs->basic |= VMX_BASIC_INOUT;

View file

@ -2747,7 +2747,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
#endif
/* Require Write-Back (WB) memory type for VMCS accesses. */
if (((vmx_msr_high >> 18) & 15) != 6)
if (((vmx_msr_high >> 18) & 15) != X86_MEMTYPE_WB)
return -EIO;
rdmsrl(MSR_IA32_VMX_MISC, misc_msr);

View file

@ -176,15 +176,6 @@ static inline void set_page_memtype(struct page *pg,
}
#endif
enum {
PAT_UC = 0, /* uncached */
PAT_WC = 1, /* Write combining */
PAT_WT = 4, /* Write Through */
PAT_WP = 5, /* Write Protected */
PAT_WB = 6, /* Write Back (default) */
PAT_UC_MINUS = 7, /* UC, but can be overridden by MTRR */
};
#define CM(c) (_PAGE_CACHE_MODE_ ## c)
static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val,
@ -194,13 +185,13 @@ static enum page_cache_mode __init pat_get_cache_mode(unsigned int pat_val,
char *cache_mode;
switch (pat_val) {
case PAT_UC: cache = CM(UC); cache_mode = "UC "; break;
case PAT_WC: cache = CM(WC); cache_mode = "WC "; break;
case PAT_WT: cache = CM(WT); cache_mode = "WT "; break;
case PAT_WP: cache = CM(WP); cache_mode = "WP "; break;
case PAT_WB: cache = CM(WB); cache_mode = "WB "; break;
case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
default: cache = CM(WB); cache_mode = "WB "; break;
case X86_MEMTYPE_UC: cache = CM(UC); cache_mode = "UC "; break;
case X86_MEMTYPE_WC: cache = CM(WC); cache_mode = "WC "; break;
case X86_MEMTYPE_WT: cache = CM(WT); cache_mode = "WT "; break;
case X86_MEMTYPE_WP: cache = CM(WP); cache_mode = "WP "; break;
case X86_MEMTYPE_WB: cache = CM(WB); cache_mode = "WB "; break;
case X86_MEMTYPE_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break;
default: cache = CM(WB); cache_mode = "WB "; break;
}
memcpy(msg, cache_mode, 4);
@ -257,11 +248,11 @@ void pat_cpu_init(void)
void __init pat_bp_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
#define PAT(p0, p1, p2, p3, p4, p5, p6, p7) \
(((u64)PAT_ ## p0) | ((u64)PAT_ ## p1 << 8) | \
((u64)PAT_ ## p2 << 16) | ((u64)PAT_ ## p3 << 24) | \
((u64)PAT_ ## p4 << 32) | ((u64)PAT_ ## p5 << 40) | \
((u64)PAT_ ## p6 << 48) | ((u64)PAT_ ## p7 << 56))
#define PAT(p0, p1, p2, p3, p4, p5, p6, p7) \
((X86_MEMTYPE_ ## p0) | (X86_MEMTYPE_ ## p1 << 8) | \
(X86_MEMTYPE_ ## p2 << 16) | (X86_MEMTYPE_ ## p3 << 24) | \
(X86_MEMTYPE_ ## p4 << 32) | (X86_MEMTYPE_ ## p5 << 40) | \
(X86_MEMTYPE_ ## p6 << 48) | (X86_MEMTYPE_ ## p7 << 56))
if (!IS_ENABLED(CONFIG_X86_PAT))