GVT-g supports both Xen/KVM hypervisors and requires a couple of hypervisor services to work. The MPT framework is a kinds of abstraction which provides a unique hypervisor APIs to GVT-g core logics. Signed-off-by: Zhi Wang <zhi.a.wang@xxxxxxxxx> --- drivers/gpu/drm/i915/gvt/gvt.c | 6 ++ drivers/gpu/drm/i915/gvt/gvt.h | 11 +- drivers/gpu/drm/i915/gvt/hypercall.h | 26 +++++ drivers/gpu/drm/i915/gvt/mmio.c | 194 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/gvt/mmio.h | 4 + drivers/gpu/drm/i915/gvt/mpt.h | 103 +++++++++++++++---- drivers/gpu/drm/i915/gvt/perf.h | 4 + 7 files changed, 326 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 13fecdf..a71873c 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -31,6 +31,11 @@ struct gvt_host gvt_host; extern struct gvt_kernel_dm xengt_kdm; extern struct gvt_kernel_dm kvmgt_kdm; +static struct gvt_io_emulation_ops default_io_emulation_ops = { + .emulate_mmio_read = gvt_emulate_mmio_read, + .emulate_mmio_write = gvt_emulate_mmio_write, +}; + static const char *supported_hypervisors[] = { [GVT_HYPERVISOR_TYPE_XEN] = "Xen Hypervisor", [GVT_HYPERVISOR_TYPE_KVM] = "KVM", @@ -72,6 +77,7 @@ static bool gvt_init_host(void) gvt_info("Running with hypervisor %s in host mode", supported_hypervisors[host->hypervisor_type]); + host->emulate_ops = &default_io_emulation_ops; idr_init(&host->device_idr); mutex_init(&host->device_idr_lock); diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 542f3e6..eb5fd47 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -31,7 +31,6 @@ #include "params.h" #include "reg.h" #include "hypercall.h" -#include "mpt.h" #include "fb_decoder.h" #include "mmio.h" #include "interrupt.h" @@ -52,12 +51,20 @@ enum { GVT_HYPERVISOR_TYPE_KVM, }; +struct gvt_io_emulation_ops { + bool (*emulate_mmio_read)(struct vgt_device *, uint64_t, void *, int); + bool (*emulate_mmio_write)(struct vgt_device *, uint64_t, void *, int); + bool (*emulate_cfg_read)(struct vgt_device *, unsigned int, void *, int); + bool (*emulate_cfg_write)(struct vgt_device *, unsigned int, void *, int); +}; + struct gvt_host { bool initialized; int hypervisor_type; struct mutex device_idr_lock; struct idr device_idr; struct gvt_kernel_dm *kdm; + struct gvt_io_emulation_ops *emulate_ops; }; extern struct gvt_host gvt_host; @@ -579,4 +586,6 @@ static inline u32 h2g_gtt_index(struct vgt_device *vgt, uint32_t h_index) return (u32)(h2g_gm(vgt, h_addr) >> GTT_PAGE_SHIFT); } +#include "mpt.h" + #endif diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 0a41874..d30f5a7 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -24,7 +24,33 @@ #ifndef _GVT_HYPERCALL_H_ #define _GVT_HYPERCALL_H_ +struct vgt_device; +struct guest_page; + +enum map_type { + GVT_MAP_APERTURE, + GVT_MAP_OPREGION, +}; + struct gvt_kernel_dm { + const char *name; + unsigned long (*g2m_pfn)(int vm_id, unsigned long g_pfn); + int (*pause_domain)(int vm_id); + int (*shutdown_domain)(int vm_id); + int (*map_mfn_to_gpfn)(int vm_id, unsigned long gpfn, + unsigned long mfn, int nr, int map, enum map_type type); + int (*set_trap_area)(struct vgt_device *vgt, uint64_t start, uint64_t end, bool map); + bool (*set_wp_pages)(struct vgt_device *vgt, struct guest_page *p); + bool (*unset_wp_pages)(struct vgt_device *vgt, struct guest_page *p); + int (*detect_host)(void); + int (*from_virt_to_mfn)(void *addr); + void *(*from_mfn_to_virt)(int mfn); + int (*inject_msi)(int vm_id, u32 addr, u16 data); + int (*hvm_init)(struct vgt_device *vgt); + void (*hvm_exit)(struct vgt_device *vgt); + void *(*gpa_to_va)(struct vgt_device *vgt, unsigned long gap); + bool (*read_va)(struct vgt_device *vgt, void *va, void *val, int len, int atomic); + bool (*write_va)(struct vgt_device *vgt, void *va, void *val, int len, int atomic); }; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 28e1393..3297d82 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -320,3 +320,197 @@ void gvt_init_shadow_mmio_register(struct vgt_device *vgt) struct gvt_virtual_device_state *state = &vgt->state; memcpy (state->mmio.sreg, vgt->pdev->initial_mmio_state, vgt->pdev->mmio_size); } + +unsigned int pa_to_mmio_offset(struct vgt_device *vgt, + uint64_t pa) +{ +#define PCI_BAR_ADDR_MASK (~0xFUL) /* 4 LSB bits are not address */ + return pa - ((*(u64*)(vgt->state.cfg.space + GVT_REG_CFG_SPACE_BAR0)) + & PCI_BAR_ADDR_MASK); +} + +static inline bool valid_mmio_alignment(struct gvt_mmio_entry *e, + unsigned int offset, int bytes) +{ + if ((bytes >= e->align_bytes) && !(offset & (bytes - 1))) + return true; + gvt_err("invalid MMIO offset %08x len %d", offset, bytes); + return false; +} + +bool gvt_default_mmio_read(struct vgt_device *vgt, unsigned int offset, + void *p_data, unsigned int bytes) +{ + memcpy(p_data, (char *)vgt->state.mmio.vreg + offset, bytes); + return true; +} + +bool gvt_default_mmio_write(struct vgt_device *vgt, unsigned int offset, + void *p_data, unsigned int bytes) +{ + memcpy((char *)vgt->state.mmio.vreg + offset, p_data, bytes); + return true; +} + +bool gvt_emulate_mmio_read(struct vgt_device *vgt, uint64_t pa, void *p_data,int bytes) +{ + struct pgt_device *pdev = vgt->pdev; + struct gvt_statistics *stat = &vgt->stat; + struct gvt_mmio_entry *mmio_entry; + unsigned int offset; + cycles_t t0, t1; + bool r; + + t0 = get_cycles(); + + mutex_lock(&pdev->lock); + + if (atomic_read(&vgt->gtt.n_write_protected_guest_page)) { + guest_page_t *gp; + gp = gvt_find_guest_page(vgt, pa >> PAGE_SHIFT); + if (gp) { + memcpy(p_data, gp->vaddr + (pa & ~PAGE_MASK), bytes); + mutex_unlock(&pdev->lock); + return true; + } + } + + offset = pa_to_mmio_offset(vgt, pa); + + if (bytes > 8 || (offset & (bytes - 1))) + goto err; + + if (reg_is_gtt(pdev, offset)) { + r = gtt_emulate_read(vgt, offset, p_data, bytes); + mutex_unlock(&pdev->lock); + return r; + } + + if (!reg_is_mmio(pdev, offset + bytes)) + goto err; + + mmio_entry = find_mmio_entry(pdev, offset); + if (mmio_entry && mmio_entry->read) { + if (!valid_mmio_alignment(mmio_entry, offset, bytes)) + goto err; + if (!mmio_entry->read(vgt, offset, p_data, bytes)) + goto err; + } else + if (!gvt_default_mmio_read(vgt, offset, p_data, bytes)) + goto err; + + if (!reg_is_tracked(pdev, offset) && vgt->warn_untrack) { + gvt_warn("[ vgt%d ] untracked MMIO read, offset %x len %d val 0x%x", + vgt->vm_id, offset, bytes, *(u32 *)p_data); + + if (offset == 0x206c) { + printk("------------------------------------------\n"); + printk("VM(%d) likely triggers a gfx reset\n", vgt->vm_id); + printk("Disable untracked MMIO warning for VM(%d)\n", vgt->vm_id); + printk("------------------------------------------\n"); + vgt->warn_untrack = 0; + } + } + + reg_set_accessed(pdev, offset); + mutex_unlock(&pdev->lock); + + t1 = get_cycles(); + stat->mmio_rcnt++; + stat->mmio_rcycles += t1 - t0; + return true; +err: + gvt_err("[ vgt%d ] fail to emulate MMIO read, offset %08x len %d", + vgt->id, offset, bytes); + mutex_unlock(&pdev->lock); + return false; +} + +bool gvt_emulate_mmio_write(struct vgt_device *vgt, uint64_t pa, + void *p_data, int bytes) +{ + struct pgt_device *pdev = vgt->pdev; + struct gvt_mmio_entry *mmio_entry; + struct gvt_statistics *stat = &vgt->stat; + unsigned int offset; + u32 old_vreg = 0, old_sreg = 0; + cycles_t t0, t1; + bool r; + + t0 = get_cycles(); + + mutex_lock(&pdev->lock); + + if (atomic_read(&vgt->gtt.n_write_protected_guest_page)) { + guest_page_t *guest_page; + guest_page = gvt_find_guest_page(vgt, pa >> PAGE_SHIFT); + if (guest_page) { + r = guest_page->handler(guest_page, pa, p_data, bytes); + t1 = get_cycles(); + stat->wp_cycles += t1 - t0; + stat->wp_cnt++; + mutex_unlock(&pdev->lock); + return r; + } + } + + offset = pa_to_mmio_offset(vgt, pa); + + /* FENCE registers / GTT entries(sometimes) are accessed in 8 bytes. */ + if (bytes > 8 || (offset & (bytes - 1))) + goto err; + + if (reg_is_gtt(pdev, offset)) { + r = gtt_emulate_write(vgt, offset, p_data, bytes); + mutex_unlock(&pdev->lock); + return r; + } + + if (!reg_is_mmio(pdev, offset + bytes)) + goto err; + + if (reg_mode_ctl(pdev, offset)) { + old_vreg = __vreg(vgt, offset); + old_sreg = __sreg(vgt, offset); + } + + if (!reg_is_tracked(pdev, offset) && vgt->warn_untrack) { + gvt_warn("[ vgt%d ] untracked MMIO write, offset %x len %d val 0x%x", + vgt->vm_id, offset, bytes, *(u32 *)p_data); + } + + mmio_entry = find_mmio_entry(pdev, offset); + if (mmio_entry && mmio_entry->write ) { + if (!valid_mmio_alignment(mmio_entry, offset, bytes)) + goto err; + if (!mmio_entry->write(vgt, offset, p_data, bytes)) + goto err; + } else + if (!gvt_default_mmio_write(vgt, offset, p_data, bytes)) + goto err; + + /* higher 16bits of mode ctl regs are mask bits for change */ + if (reg_mode_ctl(pdev, offset)) { + u32 mask = __vreg(vgt, offset) >> 16; + /* + * share the global mask among VMs, since having one VM touch a bit + * not changed by another VM should be still saved/restored later + */ + reg_aux_mode_mask(pdev, offset) |= mask << 16; + __vreg(vgt, offset) = (old_vreg & ~mask) | (__vreg(vgt, offset) & mask); + __sreg(vgt, offset) = (old_sreg & ~mask) | (__sreg(vgt, offset) & mask); + } + + reg_set_accessed(pdev, offset); + mutex_unlock(&pdev->lock); + + t1 = get_cycles(); + stat->mmio_wcycles += t1 - t0; + stat->mmio_wcnt++; + return true; +err: + gvt_err("[ vgt%d ] fail to emulate MMIO write, offset %08x len %d", + vgt->id, offset, bytes); + mutex_unlock(&pdev->lock); + return false; +} diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index caca60f..4301655 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -84,4 +84,8 @@ struct gvt_reg_info { extern struct gvt_reg_info gvt_general_reg_info[]; extern struct gvt_reg_info gvt_broadwell_reg_info[]; extern int gvt_get_reg_num(int type); + +bool gvt_emulate_mmio_read(struct vgt_device *vgt, uint64_t pa, void *p_data,int bytes); +bool gvt_emulate_mmio_write(struct vgt_device *vgt, uint64_t pa, void *p_data,int bytes); + #endif diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 99acf3d..f837dd1 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -24,85 +24,146 @@ #ifndef _GVT_MPT_H_ #define _GVT_MPT_H_ -struct guest_page; -struct vgt_device; - static inline unsigned long hypervisor_g2m_pfn(struct vgt_device *vgt, unsigned long g_pfn) { - return 0; + return gvt_host.kdm->g2m_pfn(vgt->vm_id, g_pfn); } static inline int hypervisor_pause_domain(struct vgt_device *vgt) { - return 0; + return gvt_host.kdm->pause_domain(vgt->vm_id); } static inline int hypervisor_shutdown_domain(struct vgt_device *vgt) { + return gvt_host.kdm->shutdown_domain(vgt->vm_id); +} + +static inline int hypervisor_map_mfn_to_gpfn(struct vgt_device *vgt, + unsigned long gpfn, unsigned long mfn, int nr, int map, enum map_type type) +{ + if (gvt_host.kdm && gvt_host.kdm->map_mfn_to_gpfn) + return gvt_host.kdm->map_mfn_to_gpfn(vgt->vm_id, gpfn, mfn, nr, map, type); + return 0; } static inline int hypervisor_set_trap_area(struct vgt_device *vgt, - uint64_t start, uint64_t end, bool map) + u64 start, u64 end, bool map) { - return 0; + return gvt_host.kdm->set_trap_area(vgt, start, end, map); } -static inline bool hypervisor_detect_host(void) +static inline int hypervisor_set_wp_pages(struct vgt_device *vgt, guest_page_t *p) { - return false; + return gvt_host.kdm->set_wp_pages(vgt, p); } -static inline int hypervisor_virt_to_mfn(void *addr) +static inline int hypervisor_unset_wp_pages(struct vgt_device *vgt, guest_page_t *p) { - return 0; + return gvt_host.kdm->unset_wp_pages(vgt, p); } -static inline void *hypervisor_mfn_to_virt(int mfn) +static inline int hypervisor_detect_host(void) { - return NULL; + return gvt_host.kdm->detect_host(); } -static inline int hypervisor_set_wp_pages(struct vgt_device *vgt, struct guest_page *p) +static inline int hypervisor_virt_to_mfn(void *addr) { - return 0; + return gvt_host.kdm->from_virt_to_mfn(addr); } -static inline int hypervisor_unset_wp_pages(struct vgt_device *vgt, struct guest_page *p) +static inline void *hypervisor_mfn_to_virt(int mfn) { - return 0; + return gvt_host.kdm->from_mfn_to_virt(mfn); } static inline void hypervisor_inject_msi(struct vgt_device *vgt) { - return; +#define MSI_CAP_OFFSET 0x90 /* FIXME. need to get from cfg emulation */ +#define MSI_CAP_CONTROL (MSI_CAP_OFFSET + 2) +#define MSI_CAP_ADDRESS (MSI_CAP_OFFSET + 4) +#define MSI_CAP_DATA (MSI_CAP_OFFSET + 8) +#define MSI_CAP_EN 0x1 + + char *cfg_space = &vgt->state.cfg.space[0]; + u16 control = *(u16 *)(cfg_space + MSI_CAP_CONTROL); + u32 addr = *(u32 *)(cfg_space + MSI_CAP_ADDRESS); + u16 data = *(u16 *)(cfg_space + MSI_CAP_DATA); + int r; + + /* Do not generate MSI if MSIEN is disable */ + if (!(control & MSI_CAP_EN)) + return; + + /* FIXME: currently only handle one MSI format */ + ASSERT_NUM(!(control & 0xfffe), control); + + gvt_dbg(GVT_DBG_IRQ, "VM %d hvm injections. address (%x) data(%x)!", + vgt->vm_id, addr, data); + r = gvt_host.kdm->inject_msi(vgt->vm_id, addr, data); + if (r < 0) + gvt_err("VGT %d failed to inject vmsi", vgt->id); } static inline int hypervisor_hvm_init(struct vgt_device *vgt) { + if (gvt_host.kdm && gvt_host.kdm->hvm_init) + return gvt_host.kdm->hvm_init(vgt); + return 0; } static inline void hypervisor_hvm_exit(struct vgt_device *vgt) { + if (gvt_host.kdm && gvt_host.kdm->hvm_exit) + gvt_host.kdm->hvm_exit(vgt); } static inline void *hypervisor_gpa_to_va(struct vgt_device *vgt, unsigned long gpa) { - return NULL; + if (!vgt->vm_id) + return (char *)hypervisor_mfn_to_virt(gpa >> PAGE_SHIFT) + offset_in_page(gpa); + + return gvt_host.kdm->gpa_to_va(vgt, gpa); } static inline bool hypervisor_read_va(struct vgt_device *vgt, void *va, void *val, int len, int atomic) { - return false; + bool ret; + + if (!vgt->vm_id) { + memcpy(val, va, len); + return true; + } + + ret = gvt_host.kdm->read_va(vgt, va, val, len, atomic); + if (unlikely(!ret)) + gvt_err("VM(%d): read va failed, va: 0x%p, atomic : %s\n", vgt->vm_id, + va, atomic ? "yes" : "no"); + + return ret; } static inline bool hypervisor_write_va(struct vgt_device *vgt, void *va, void *val, int len, int atomic) { - return false; + bool ret; + + if (!vgt->vm_id) { + memcpy(va, val, len); + return true; + } + + ret = gvt_host.kdm->write_va(vgt, va, val, len, atomic); + if (unlikely(!ret)) + gvt_err("VM(%d): write va failed, va: 0x%p, atomic : %s\n", vgt->vm_id, + va, atomic ? "yes" : "no"); + + return ret; } #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/perf.h b/drivers/gpu/drm/i915/gvt/perf.h index 146a1cb..21b0637 100644 --- a/drivers/gpu/drm/i915/gvt/perf.h +++ b/drivers/gpu/drm/i915/gvt/perf.h @@ -28,6 +28,10 @@ struct gvt_statistics { u64 irq_num; u64 events[GVT_EVENT_MAX]; u64 last_injection; + u64 mmio_rcnt; + u64 mmio_wcnt; + u64 mmio_wcycles; + u64 mmio_rcycles; u64 gtt_mmio_rcnt; u64 gtt_mmio_wcnt; u64 gtt_mmio_wcycles; -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx