Re: [PATCH v7 14/16] i386: Use CPUCacheInfo.share_level to encode CPUID[4]

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 1/8/2024 4:27 PM, Zhao Liu wrote:
From: Zhao Liu <zhao1.liu@xxxxxxxxx>

CPUID[4].EAX[bits 25:14] is used to represent the cache topology for
Intel CPUs.

After cache models have topology information, we can use
CPUCacheInfo.share_level to decide which topology level to be encoded
into CPUID[4].EAX[bits 25:14].

And since maximum_processor_id (original "num_apic_ids") is parsed
based on cpu topology levels, which are verified when parsing smp, it's
no need to check this value by "assert(num_apic_ids > 0)" again, so
remove this assert.

Additionally, wrap the encoding of CPUID[4].EAX[bits 31:26] into a
helper to make the code cleaner.

Signed-off-by: Zhao Liu <zhao1.liu@xxxxxxxxx>
Tested-by: Babu Moger <babu.moger@xxxxxxx>
Tested-by: Yongwei Ma <yongwei.ma@xxxxxxxxx>
Acked-by: Michael S. Tsirkin <mst@xxxxxxxxxx>
---
Changes since v1:
  * Use "enum CPUTopoLevel share_level" as the parameter in
    max_processor_ids_for_cache().
  * Make cache_into_passthrough case also use
    max_processor_ids_for_cache() and max_core_ids_in_package() to
    encode CPUID[4]. (Yanan)
  * Rename the title of this patch (the original is "i386: Use
    CPUCacheInfo.share_level to encode CPUID[4].EAX[bits 25:14]").
---
  target/i386/cpu.c | 70 +++++++++++++++++++++++++++++------------------
  1 file changed, 43 insertions(+), 27 deletions(-)

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 81e07474acef..b23e8190dc68 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -235,22 +235,53 @@ static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
                         ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
                         0 /* Invalid value */)
+static uint32_t max_processor_ids_for_cache(X86CPUTopoInfo *topo_info,
+                                            enum CPUTopoLevel share_level)

I prefer the name to max_lp_ids_share_the_cache()

+{
+    uint32_t num_ids = 0;
+
+    switch (share_level) {
+    case CPU_TOPO_LEVEL_CORE:
+        num_ids = 1 << apicid_core_offset(topo_info);
+        break;
+    case CPU_TOPO_LEVEL_DIE:
+        num_ids = 1 << apicid_die_offset(topo_info);
+        break;
+    case CPU_TOPO_LEVEL_PACKAGE:
+        num_ids = 1 << apicid_pkg_offset(topo_info);
+        break;
+    default:
+        /*
+         * Currently there is no use case for SMT and MODULE, so use
+         * assert directly to facilitate debugging.
+         */
+        g_assert_not_reached();
+    }
+
+    return num_ids - 1;

suggest to just return num_ids, and let the caller to do the -1 work.

+}
+
+static uint32_t max_core_ids_in_package(X86CPUTopoInfo *topo_info)
+{
+    uint32_t num_cores = 1 << (apicid_pkg_offset(topo_info) -
+                               apicid_core_offset(topo_info));
+    return num_cores - 1;

ditto.

+}
/* Encode cache info for CPUID[4] */
  static void encode_cache_cpuid4(CPUCacheInfo *cache,
-                                int num_apic_ids, int num_cores,
+                                X86CPUTopoInfo *topo_info,
                                  uint32_t *eax, uint32_t *ebx,
                                  uint32_t *ecx, uint32_t *edx)
  {
      assert(cache->size == cache->line_size * cache->associativity *
                            cache->partitions * cache->sets);
- assert(num_apic_ids > 0);
      *eax = CACHE_TYPE(cache->type) |
             CACHE_LEVEL(cache->level) |
             (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
-           ((num_cores - 1) << 26) |
-           ((num_apic_ids - 1) << 14);
+           (max_core_ids_in_package(topo_info) << 26) |
+           (max_processor_ids_for_cache(topo_info, cache->share_level) << 14);

by the way, we can change the order of the two line. :)

assert(cache->line_size > 0);
      assert(cache->partitions > 0);
@@ -6263,56 +6294,41 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
                  int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
if (cores_per_pkg > 1) {
-                    int addressable_cores_offset =
-                                                apicid_pkg_offset(&topo_info) -
-                                                apicid_core_offset(&topo_info);
-
                      *eax &= ~0xFC000000;
-                    *eax |= (1 << (addressable_cores_offset - 1)) << 26;
+                    *eax |= max_core_ids_in_package(&topo_info) << 26;
                  }
                  if (host_vcpus_per_cache > cpus_per_pkg) {
-                    int pkg_offset = apicid_pkg_offset(&topo_info);
-
                      *eax &= ~0x3FFC000;
-                    *eax |= (1 << (pkg_offset - 1)) << 14;
+                    *eax |=
+                        max_processor_ids_for_cache(&topo_info,
+                                                CPU_TOPO_LEVEL_PACKAGE) << 14;
                  }
              }
          } else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
              *eax = *ebx = *ecx = *edx = 0;
          } else {
              *eax = 0;
-            int addressable_cores_offset = apicid_pkg_offset(&topo_info) -
-                                           apicid_core_offset(&topo_info);
-            int core_offset, die_offset;
switch (count) {
              case 0: /* L1 dcache info */
-                core_offset = apicid_core_offset(&topo_info);
                  encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
-                                    (1 << core_offset),
-                                    (1 << addressable_cores_offset),
+                                    &topo_info,
                                      eax, ebx, ecx, edx);
                  break;
              case 1: /* L1 icache info */
-                core_offset = apicid_core_offset(&topo_info);
                  encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
-                                    (1 << core_offset),
-                                    (1 << addressable_cores_offset),
+                                    &topo_info,
                                      eax, ebx, ecx, edx);
                  break;
              case 2: /* L2 cache info */
-                core_offset = apicid_core_offset(&topo_info);
                  encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
-                                    (1 << core_offset),
-                                    (1 << addressable_cores_offset),
+                                    &topo_info,
                                      eax, ebx, ecx, edx);
                  break;
              case 3: /* L3 cache info */
-                die_offset = apicid_die_offset(&topo_info);
                  if (cpu->enable_l3_cache) {
                      encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
-                                        (1 << die_offset),
-                                        (1 << addressable_cores_offset),
+                                        &topo_info,
                                          eax, ebx, ecx, edx);
                      break;
                  }





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux