Add helpers to read/write per-cpu data instead of open coding access with gs: and magic numbers. Keeping track of what offsets are used for what and by whom is a nightmare. No functional change intended. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- lib/x86/desc.c | 23 ++++++------------ lib/x86/smp.c | 7 ++---- lib/x86/smp.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 21 deletions(-) diff --git a/lib/x86/desc.c b/lib/x86/desc.c index 25c5ac55..22ff59e9 100644 --- a/lib/x86/desc.c +++ b/lib/x86/desc.c @@ -1,6 +1,7 @@ #include "libcflat.h" #include "desc.h" #include "processor.h" +#include "smp.h" #include <setjmp.h> #include "apic-defs.h" @@ -155,11 +156,10 @@ void unhandled_exception(struct ex_regs *regs, bool cpu) static void check_exception_table(struct ex_regs *regs) { struct ex_record *ex; - unsigned ex_val; - ex_val = regs->vector | (regs->error_code << 16) | - (((regs->rflags >> 16) & 1) << 8); - asm("mov %0, %%gs:4" : : "r"(ex_val)); + this_cpu_write_exception_vector(regs->vector); + this_cpu_write_exception_rflags_rf((regs->rflags >> 16) & 1); + this_cpu_write_exception_error_code(regs->error_code); for (ex = &exception_table_start; ex != &exception_table_end; ++ex) { if (ex->rip == regs->rip) { @@ -296,10 +296,7 @@ void setup_idt(void) unsigned exception_vector(void) { - unsigned char vector; - - asm volatile("movb %%gs:4, %0" : "=q"(vector)); - return vector; + return this_cpu_read_exception_vector(); } int write_cr4_checking(unsigned long val) @@ -312,18 +309,12 @@ int write_cr4_checking(unsigned long val) unsigned exception_error_code(void) { - unsigned short error_code; - - asm volatile("mov %%gs:6, %0" : "=r"(error_code)); - return error_code; + return this_cpu_read_exception_error_code(); } bool exception_rflags_rf(void) { - unsigned char rf_flag; - - asm volatile("movb %%gs:5, %b0" : "=q"(rf_flag)); - return rf_flag & 1; + return this_cpu_read_exception_rflags_rf() & 1; } static char intr_alt_stack[4096]; diff --git a/lib/x86/smp.c b/lib/x86/smp.c index b24675fd..683b25d1 100644 --- a/lib/x86/smp.c +++ b/lib/x86/smp.c @@ -54,15 +54,12 @@ int cpu_count(void) int smp_id(void) { - unsigned id; - - asm ("mov %%gs:0, %0" : "=r"(id)); - return id; + return this_cpu_read_smp_id(); } static void setup_smp_id(void *data) { - asm ("mov %0, %%gs:0" : : "r"(apic_id()) : "memory"); + this_cpu_write_smp_id(apic_id()); } static void __on_cpu(int cpu, void (*function)(void *data), void *data, int wait) diff --git a/lib/x86/smp.h b/lib/x86/smp.h index f74845e6..eb037a46 100644 --- a/lib/x86/smp.h +++ b/lib/x86/smp.h @@ -1,7 +1,72 @@ #ifndef _X86_SMP_H_ #define _X86_SMP_H_ + +#include <stddef.h> #include <asm/spinlock.h> +/* Offsets into the per-cpu page. */ +struct percpu_data { + uint32_t smp_id; + union { + struct { + uint8_t exception_vector; + uint8_t exception_rflags_rf; + uint16_t exception_error_code; + }; + uint32_t exception_data; + }; +}; + +#define typeof_percpu(name) typeof(((struct percpu_data *)0)->name) +#define offsetof_percpu(name) offsetof(struct percpu_data, name) + +#define BUILD_PERCPU_OP(name) \ +static inline typeof_percpu(name) this_cpu_read_##name(void) \ +{ \ + typeof_percpu(name) val; \ + \ + switch (sizeof(val)) { \ + case 1: \ + asm("movb %%gs:%c1, %0" : "=q" (val) : "i" (offsetof_percpu(name))); \ + break; \ + case 2: \ + asm("movw %%gs:%c1, %0" : "=r" (val) : "i" (offsetof_percpu(name))); \ + break; \ + case 4: \ + asm("movl %%gs:%c1, %0" : "=r" (val) : "i" (offsetof_percpu(name))); \ + break; \ + case 8: \ + asm("movq %%gs:%c1, %0" : "=r" (val) : "i" (offsetof_percpu(name))); \ + break; \ + default: \ + asm volatile("ud2"); \ + } \ + return val; \ +} \ +static inline void this_cpu_write_##name(typeof_percpu(name) val) \ +{ \ + switch (sizeof(val)) { \ + case 1: \ + asm("movb %0, %%gs:%c1" :: "q" (val), "i" (offsetof_percpu(name))); \ + break; \ + case 2: \ + asm("movw %0, %%gs:%c1" :: "r" (val), "i" (offsetof_percpu(name))); \ + break; \ + case 4: \ + asm("movl %0, %%gs:%c1" :: "r" (val), "i" (offsetof_percpu(name))); \ + break; \ + case 8: \ + asm("movq %0, %%gs:%c1" :: "r" (val), "i" (offsetof_percpu(name))); \ + break; \ + default: \ + asm volatile("ud2"); \ + } \ +} +BUILD_PERCPU_OP(smp_id); +BUILD_PERCPU_OP(exception_vector); +BUILD_PERCPU_OP(exception_rflags_rf); +BUILD_PERCPU_OP(exception_error_code); + void smp_init(void); int cpu_count(void); -- 2.35.0.rc0.227.g00780c9af4-goog