From: Liu Jingqi <jingqi.liu@xxxxxxxxx> MOVDIR64B moves 64-bytes as direct-store with 64-bytes write atomicity. Direct store is implemented by using write combining (WC) for writing data directly into memory without caching the data. The bit definition: CPUID.(EAX=7,ECX=0):ECX[bit 28] MOVDIR64B The release document ref below link: https://software.intel.com/sites/default/files/managed/c5/15/\ architecture-instruction-set-extensions-programming-reference.pdf Cc: Xu Tao <tao3.xu@xxxxxxxxx> Signed-off-by: Liu Jingqi <jingqi.liu@xxxxxxxxx> Message-Id: <1541488407-17045-3-git-send-email-jingqi.liu@xxxxxxxxx> Signed-off-by: Eduardo Habkost <ehabkost@xxxxxxxxxx> --- target/i386/cpu.h | 1 + target/i386/cpu.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/target/i386/cpu.h b/target/i386/cpu.h index b4f03ffd74..ef41a033c5 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -688,6 +688,7 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_ECX_RDPID (1U << 22) #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) /* CLDEMOTE Instruction */ #define CPUID_7_0_ECX_MOVDIRI (1U << 27) /* MOVDIRI Instruction */ +#define CPUID_7_0_ECX_MOVDIR64B (1U << 28) /* MOVDIR64B Instruction */ #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */ #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */ diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 227baea337..86a934d450 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1024,7 +1024,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "la57", NULL, NULL, NULL, NULL, NULL, "rdpid", NULL, NULL, "cldemote", NULL, "movdiri", - NULL, NULL, NULL, NULL, + "movdir64b", NULL, NULL, NULL, }, .cpuid = { .eax = 7, -- 2.18.0.rc1.1.g3f1ff2140