On Mon, Sep 10, 2018 at 03:44:33PM -0600, Jim Fehlig wrote: > On 08/05/2018 03:48 PM, Marek Marczykowski-Górecki wrote: > > Since this is something between PV and HVM, it makes sense to put the > > setting in place where domain type is specified. > > To enable it, use <os><type machine="xenpvh">...</type></os>. It is > > also included in capabilities.xml, for every supported HVM guest type - it > > doesn't seems to be any other requirement (besides new enough Xen). > > > > Signed-off-by: Marek Marczykowski-Górecki <marmarek@xxxxxxxxxxxxxxxxxxxxxx> > > --- > > docs/formatcaps.html.in | 4 +-- > > docs/schemas/domaincommon.rng | 1 +- > > src/libxl/libxl_capabilities.c | 23 ++++++++++++++++++-- > > src/libxl/libxl_conf.c | 41 ++++++++++++++++++++++++++++++----- > > src/libxl/libxl_driver.c | 6 +++-- > > 5 files changed, 64 insertions(+), 11 deletions(-) > > > > diff --git a/docs/formatcaps.html.in b/docs/formatcaps.html.in > > index 34a74a9..1f17aa9 100644 > > --- a/docs/formatcaps.html.in > > +++ b/docs/formatcaps.html.in > > @@ -104,8 +104,8 @@ > > <dt><code>machine</code></dt><dd>Machine type, for use in > > <a href="formatdomain.html#attributeOSTypeMachine">machine</a> > > attribute of os/type element in domain XML. For example Xen > > - supports <code>xenfv</code> for HVM or <code>xenpv</code> for > > - PV.</dd> > > + supports <code>xenfv</code> for HVM, <code>xenpv</code> for > > + PV, or <code>xenpvh</code> for PVHv2.</dd> > > <dt><code>domain</code></dt><dd>Supported domain type, named by the > > <code>type</code> attribute.</dd> > > </dl> > > diff --git a/docs/schemas/domaincommon.rng b/docs/schemas/domaincommon.rng > > index eded1ca..d32b0cb 100644 > > --- a/docs/schemas/domaincommon.rng > > +++ b/docs/schemas/domaincommon.rng > > @@ -341,6 +341,7 @@ > > <choice> > > <value>xenpv</value> > > <value>xenfv</value> > > + <value>xenpvh</value> > > </choice> > > </attribute> > > </optional> > > diff --git a/src/libxl/libxl_capabilities.c b/src/libxl/libxl_capabilities.c > > index 18596c7..e7b26f1 100644 > > --- a/src/libxl/libxl_capabilities.c > > +++ b/src/libxl/libxl_capabilities.c > > @@ -57,6 +57,7 @@ struct guest_arch { > > virArch arch; > > int bits; > > int hvm; > > + int pvh; > > int pae; > > int nonpae; > > int ia64_be; > > @@ -491,13 +492,29 @@ libxlCapsInitGuests(libxl_ctx *ctx, virCapsPtr caps) > > guest_archs[i].nonpae = nonpae; > > if (ia64_be) > > guest_archs[i].ia64_be = ia64_be; > > + > > + /* On Xen >= 4.9 add PVH for each HVM guest, and do it only once */ > > I'm having problems understanding this. Do you mean add a PVH for each > supported HVM arch, but exclude PAE? E.g. standard xen_caps on x86_64 > contains > > xen-3.0-x86_64 xen-3.0-x86_32p hvm-3.0-x86_32 hvm-3.0-x86_32p hvm-3.0-x86_64 > > Given these caps, should a PVH be added that corresponds to the > hvm-3.0-x86_32 cap and another for the hvm-3.0-x86_64 cap, but the > hvm-3.0-x86_32p cap excluded? Yes, exactly. Setting PAE (or not) is possible only for HVM, but not PVH. It would be much better if Xen would report support for PVH explicitly... > > + if ((ver_info->xen_version_major > 4 || > > + (ver_info->xen_version_major == 4 && > > + ver_info->xen_version_minor >= 9)) && > > + hvm && i == nr_guest_archs-1) { > > + i = nr_guest_archs; > > + /* Too many arch flavours - highly unlikely ! */ > > + if (i >= ARRAY_CARDINALITY(guest_archs)) > > + continue; > > + nr_guest_archs++; > > + guest_archs[i].arch = arch; > > + guest_archs[i].pvh = 1; > > + } > > Without answers to the above questions, I can't really comment on this code. > Regardless, since PVH is not advertised in xen_caps shouldn't it be added to > guest_archs outside of the loop parsing xen_caps? This works on assumption that if you have HVM and new enough Xen, then you have PVH. Just having new Xen isn't enough - for example the hardware may lack VT-x. > > } > > } > > regfree(®ex); > > for (i = 0; i < nr_guest_archs; ++i) { > > virCapsGuestPtr guest; > > - char const *const xen_machines[] = {guest_archs[i].hvm ? "xenfv" : "xenpv"}; > > + char const *const xen_machines[] = { > > + guest_archs[i].hvm ? "xenfv" : > > + (guest_archs[i].pvh ? "xenpvh" : "xenpv")}; > > virCapsGuestMachinePtr *machines; > > if ((machines = virCapabilitiesAllocMachines(xen_machines, 1)) == NULL) > > @@ -557,7 +574,9 @@ libxlCapsInitGuests(libxl_ctx *ctx, virCapsPtr caps) > > 1, > > 0) == NULL) > > return -1; > > + } > > + if (guest_archs[i].hvm || guest_archs[i].pvh) { > > if (virCapabilitiesAddGuestFeature(guest, > > "hap", > > 1, > > @@ -580,7 +599,7 @@ libxlMakeDomainOSCaps(const char *machine, > > os->supported = true; > > - if (STREQ(machine, "xenpv")) > > + if (STREQ(machine, "xenpv") || STREQ(machine, "xenpvh")) > > return 0; > > capsLoader->supported = true; > > diff --git a/src/libxl/libxl_conf.c b/src/libxl/libxl_conf.c > > index f3da0ed..2df40ec 100644 > > --- a/src/libxl/libxl_conf.c > > +++ b/src/libxl/libxl_conf.c > > @@ -133,8 +133,11 @@ libxlMakeDomCreateInfo(libxl_ctx *ctx, > > libxl_domain_create_info_init(c_info); > > - if (def->os.type == VIR_DOMAIN_OSTYPE_HVM) { > > - c_info->type = LIBXL_DOMAIN_TYPE_HVM; > > + if (def->os.type == VIR_DOMAIN_OSTYPE_HVM || > > + (def->os.type == VIR_DOMAIN_OSTYPE_XEN && > > + STREQ(def->os.machine, "xenpvh"))) { > > + c_info->type = def->os.type == VIR_DOMAIN_OSTYPE_HVM ? > > + LIBXL_DOMAIN_TYPE_HVM : LIBXL_DOMAIN_TYPE_PVH; > > switch ((virTristateSwitch) def->features[VIR_DOMAIN_FEATURE_HAP]) { > > case VIR_TRISTATE_SWITCH_OFF: > > libxl_defbool_set(&c_info->hap, false); > > @@ -276,7 +279,8 @@ libxlMakeDomBuildInfo(virDomainDefPtr def, > > virDomainClockDef clock = def->clock; > > libxl_ctx *ctx = cfg->ctx; > > libxl_domain_build_info *b_info = &d_config->b_info; > > - int hvm = def->os.type == VIR_DOMAIN_OSTYPE_HVM; > > + bool hvm = def->os.type == VIR_DOMAIN_OSTYPE_HVM; > > + bool pvh = STREQ(def->os.machine, "xenpvh"); > > size_t i; > > size_t nusbdevice = 0; > > @@ -284,6 +288,8 @@ libxlMakeDomBuildInfo(virDomainDefPtr def, > > if (hvm) > > libxl_domain_build_info_init_type(b_info, LIBXL_DOMAIN_TYPE_HVM); > > + else if (pvh) > > + libxl_domain_build_info_init_type(b_info, LIBXL_DOMAIN_TYPE_PVH); > > else > > libxl_domain_build_info_init_type(b_info, LIBXL_DOMAIN_TYPE_PV); > > @@ -375,7 +381,7 @@ libxlMakeDomBuildInfo(virDomainDefPtr def, > > def->mem.cur_balloon = VIR_ROUND_UP(def->mem.cur_balloon, 1024); > > b_info->max_memkb = virDomainDefGetMemoryInitial(def); > > b_info->target_memkb = def->mem.cur_balloon; > > - if (hvm) { > > + if (hvm || pvh) { > > if (caps && > > def->cpu && def->cpu->mode == (VIR_CPU_MODE_HOST_PASSTHROUGH)) { > > bool hasHwVirt = false; > > @@ -647,6 +653,31 @@ libxlMakeDomBuildInfo(virDomainDefPtr def, > > return -1; > > } > > #endif > > + } else if (pvh) { > > +#ifdef LIBXL_HAVE_BUILDINFO_KERNEL > > + if (VIR_STRDUP(b_info->cmdline, def->os.cmdline) < 0) > > + return -1; > > + if (VIR_STRDUP(b_info->kernel, def->os.kernel) < 0) > > + return -1; > > + if (VIR_STRDUP(b_info->ramdisk, def->os.initrd) < 0) > > + return -1; > > +#else > > + /* > > + * Shouldn't happen as LIBXL_HAVE_BUILDINFO_KERNEL is there since Xen > > + * 4.5, but PVHv2 since 4.9. > > + */ > > + virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", > > + _("PVH guest type not supported")); > > +#endif > > I guess this is needed else the build will fail on Xen < 4.5? Yes, exactly. > Maybe it is > time to bump the minimum supported Xen version to 4.6 :-). I say that a bit > jokingly, but I did propose it a few months back. IMO good idea, since Xen < 4.6 is not supported anymore. > > +#ifdef LIBXL_HAVE_BUILDINFO_BOOTLOADER > > + if (VIR_STRDUP(b_info->bootloader, def->os.bootloader) < 0) > > + return -1; > > + if (def->os.bootloaderArgs) { > > + if (!(b_info->bootloader_args = > > + virStringSplit(def->os.bootloaderArgs, " \t\n", 0))) > > + return -1; > > + } > > +#endif > > } else { > > /* > > * For compatibility with the legacy xen toolstack, default to pygrub > > @@ -1230,7 +1261,7 @@ libxlMakeNic(virDomainDefPtr def, > > STRNEQ(l_nic->model, "netfront")) { > > virReportError(VIR_ERR_CONFIG_UNSUPPORTED, "%s", > > _("only model 'netfront' is supported for " > > - "Xen PV domains")); > > + "Xen PV(H) domains")); > > return -1; > > } > > if (VIR_STRDUP(x_nic->model, l_nic->model) < 0) > > diff --git a/src/libxl/libxl_driver.c b/src/libxl/libxl_driver.c > > index 5a5e792..052a0da 100644 > > --- a/src/libxl/libxl_driver.c > > +++ b/src/libxl/libxl_driver.c > > @@ -6271,9 +6271,11 @@ libxlConnectGetDomainCapabilities(virConnectPtr conn, > > emulatorbin = "/usr/bin/qemu-system-x86_64"; > > if (machine) { > > - if (STRNEQ(machine, "xenpv") && STRNEQ(machine, "xenfv")) { > > + if (STRNEQ(machine, "xenpv") && > > + STRNEQ(machine, "xenpvh") && > > + STRNEQ(machine, "xenfv")) { > > virReportError(VIR_ERR_INVALID_ARG, "%s", > > - _("Xen only supports 'xenpv' and 'xenfv' machines")); > > + _("Xen only supports 'xenpv', 'xenpvh' and 'xenfv' machines")); > > goto cleanup; > > } > > } else { > > > -- Best Regards, Marek Marczykowski-Górecki Invisible Things Lab A: Because it messes up the order in which people normally read text. Q: Why is top-posting such a bad thing?
Attachment:
signature.asc
Description: PGP signature
-- libvir-list mailing list libvir-list@xxxxxxxxxx https://www.redhat.com/mailman/listinfo/libvir-list