On Wed, Feb 07, 2024, Xin Li wrote: > @@ -32,48 +32,48 @@ BUILD_BUG_ON(1) > */ > > /* 16-bits */ > -SHADOW_FIELD_RW(GUEST_INTR_STATUS, guest_intr_status) > -SHADOW_FIELD_RW(GUEST_PML_INDEX, guest_pml_index) > -SHADOW_FIELD_RW(HOST_FS_SELECTOR, host_fs_selector) > -SHADOW_FIELD_RW(HOST_GS_SELECTOR, host_gs_selector) > +SHADOW_FIELD_RW(GUEST_INTR_STATUS, guest_intr_status, cpu_has_vmx_apicv()) > +SHADOW_FIELD_RW(GUEST_PML_INDEX, guest_pml_index, cpu_has_vmx_pml()) > +SHADOW_FIELD_RW(HOST_FS_SELECTOR, host_fs_selector, true) > +SHADOW_FIELD_RW(HOST_GS_SELECTOR, host_gs_selector, true) > > /* 32-bits */ > -SHADOW_FIELD_RO(VM_EXIT_REASON, vm_exit_reason) > -SHADOW_FIELD_RO(VM_EXIT_INTR_INFO, vm_exit_intr_info) > -SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len) > -SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field) > -SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code) > -SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code) > -SHADOW_FIELD_RO(GUEST_CS_AR_BYTES, guest_cs_ar_bytes) > -SHADOW_FIELD_RO(GUEST_SS_AR_BYTES, guest_ss_ar_bytes) > -SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control) > -SHADOW_FIELD_RW(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control) > -SHADOW_FIELD_RW(EXCEPTION_BITMAP, exception_bitmap) > -SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code) > -SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field) > -SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len) > -SHADOW_FIELD_RW(TPR_THRESHOLD, tpr_threshold) > -SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info) > -SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value) > +SHADOW_FIELD_RO(VM_EXIT_REASON, vm_exit_reason, true) > +SHADOW_FIELD_RO(VM_EXIT_INTR_INFO, vm_exit_intr_info, true) > +SHADOW_FIELD_RO(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len, true) > +SHADOW_FIELD_RO(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code, true) > +SHADOW_FIELD_RO(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field, true) > +SHADOW_FIELD_RO(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code, true) > +SHADOW_FIELD_RO(GUEST_CS_AR_BYTES, guest_cs_ar_bytes, true) > +SHADOW_FIELD_RO(GUEST_SS_AR_BYTES, guest_ss_ar_bytes, true) > +SHADOW_FIELD_RW(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control, true) > +SHADOW_FIELD_RW(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control, true) > +SHADOW_FIELD_RW(EXCEPTION_BITMAP, exception_bitmap, true) > +SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code, true) > +SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field, true) > +SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len, true) > +SHADOW_FIELD_RW(TPR_THRESHOLD, tpr_threshold, true) > +SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info, true) > +SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value, cpu_has_vmx_preemption_timer()) > > /* Natural width */ > -SHADOW_FIELD_RO(EXIT_QUALIFICATION, exit_qualification) > -SHADOW_FIELD_RO(GUEST_LINEAR_ADDRESS, guest_linear_address) > -SHADOW_FIELD_RW(GUEST_RIP, guest_rip) > -SHADOW_FIELD_RW(GUEST_RSP, guest_rsp) > -SHADOW_FIELD_RW(GUEST_CR0, guest_cr0) > -SHADOW_FIELD_RW(GUEST_CR3, guest_cr3) > -SHADOW_FIELD_RW(GUEST_CR4, guest_cr4) > -SHADOW_FIELD_RW(GUEST_RFLAGS, guest_rflags) > -SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK, cr0_guest_host_mask) > -SHADOW_FIELD_RW(CR0_READ_SHADOW, cr0_read_shadow) > -SHADOW_FIELD_RW(CR4_READ_SHADOW, cr4_read_shadow) > -SHADOW_FIELD_RW(HOST_FS_BASE, host_fs_base) > -SHADOW_FIELD_RW(HOST_GS_BASE, host_gs_base) > +SHADOW_FIELD_RO(EXIT_QUALIFICATION, exit_qualification, true) > +SHADOW_FIELD_RO(GUEST_LINEAR_ADDRESS, guest_linear_address, true) > +SHADOW_FIELD_RW(GUEST_RIP, guest_rip, true) > +SHADOW_FIELD_RW(GUEST_RSP, guest_rsp, true) > +SHADOW_FIELD_RW(GUEST_CR0, guest_cr0, true) > +SHADOW_FIELD_RW(GUEST_CR3, guest_cr3, true) > +SHADOW_FIELD_RW(GUEST_CR4, guest_cr4, true) > +SHADOW_FIELD_RW(GUEST_RFLAGS, guest_rflags, true) > +SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK, cr0_guest_host_mask, true) > +SHADOW_FIELD_RW(CR0_READ_SHADOW, cr0_read_shadow, true) > +SHADOW_FIELD_RW(CR4_READ_SHADOW, cr4_read_shadow, true) > +SHADOW_FIELD_RW(HOST_FS_BASE, host_fs_base, true) > +SHADOW_FIELD_RW(HOST_GS_BASE, host_gs_base, true) > > /* 64-bit */ > -SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS, guest_physical_address) > -SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH, guest_physical_address) > +SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS, guest_physical_address, true) > +SHADOW_FIELD_RO(GUEST_PHYSICAL_ADDRESS_HIGH, guest_physical_address, true) This is not a net postive for readability or maintability. I don't hate the idea, it just needs MOAR MACROs :-) E.g. add a layer for the common case where the field unconditionally exists. #ifndef __SHADOW_FIELD_RO #define __SHADOW_FIELD_RO(x, y, c) #endif #ifndef __SHADOW_FIELD_RW #define __SHADOW_FIELD_RW(x, y, c) #endif #define SHADOW_FIELD_RO(x, y) __SHADOW_FIELD_RO(x, y, true) #define SHADOW_FIELD_RW(x, y) __SHADOW_FIELD_RW(x, y, true)