Re: [PATCH 2/2] PCI/VMD: Set up firmware-first if capable

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Hi Jon,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on pci/next]
[also build test ERROR on v4.19-rc8 next-20181012]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Jon-Derrick/VMD-fixes-for-4-20/20181016-085809
base:   https://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git next
config: x86_64-randconfig-x019-201841 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

All errors (new ones prefixed by >>):

   drivers/pci//controller/vmd.c: In function 'vmd_enable_domain':
>> drivers/pci//controller/vmd.c:743:15: error: 'struct pci_dev' has no member named 'aer_cap'; did you mean 'ats_cap'?
       if (rpdev->aer_cap)
                  ^~~~~~~
                  ats_cap

vim +743 drivers/pci//controller/vmd.c

   581	
   582	static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
   583	{
   584		struct pci_sysdata *sd = &vmd->sysdata;
   585		struct fwnode_handle *fn;
   586		struct resource *res;
   587		u32 upper_bits;
   588		unsigned long flags;
   589		LIST_HEAD(resources);
   590		resource_size_t offset[2] = {0};
   591		resource_size_t membar2_offset = 0x2000, busn_start = 0;
   592		u8 interface;
   593	
   594		/*
   595		 * Shadow registers may exist in certain VMD device ids which allow
   596		 * guests to correctly assign host physical addresses to the root ports
   597		 * and child devices. These registers will either return the host value
   598		 * or 0, depending on an enable bit in the VMD device.
   599		 */
   600		if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
   601			u32 vmlock;
   602			int ret;
   603	
   604			membar2_offset = 0x2018;
   605			ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
   606			if (ret || vmlock == ~0)
   607				return -ENODEV;
   608	
   609			if (MB2_SHADOW_EN(vmlock)) {
   610				void __iomem *membar2;
   611	
   612				membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
   613				if (!membar2)
   614					return -ENOMEM;
   615				offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
   616							readq(membar2 + 0x2008);
   617				offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
   618							readq(membar2 + 0x2010);
   619				pci_iounmap(vmd->dev, membar2);
   620			}
   621		}
   622	
   623		/*
   624		 * Certain VMD devices may have a root port configuration option which
   625		 * limits the bus range to between 0-127 or 128-255
   626		 */
   627		if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
   628			u32 vmcap, vmconfig;
   629	
   630			pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap);
   631			pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig);
   632			if (BUS_RESTRICT_CAP(vmcap) &&
   633			    (BUS_RESTRICT_CFG(vmconfig) == 0x1))
   634				busn_start = 128;
   635		}
   636	
   637		res = &vmd->dev->resource[VMD_CFGBAR];
   638		vmd->resources[0] = (struct resource) {
   639			.name  = "VMD CFGBAR",
   640			.start = busn_start,
   641			.end   = busn_start + (resource_size(res) >> 20) - 1,
   642			.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
   643		};
   644	
   645		/*
   646		 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
   647		 * put 32-bit resources in the window.
   648		 *
   649		 * There's no hardware reason why a 64-bit window *couldn't*
   650		 * contain a 32-bit resource, but pbus_size_mem() computes the
   651		 * bridge window size assuming a 64-bit window will contain no
   652		 * 32-bit resources.  __pci_assign_resource() enforces that
   653		 * artificial restriction to make sure everything will fit.
   654		 *
   655		 * The only way we could use a 64-bit non-prefechable MEMBAR is
   656		 * if its address is <4GB so that we can convert it to a 32-bit
   657		 * resource.  To be visible to the host OS, all VMD endpoints must
   658		 * be initially configured by platform BIOS, which includes setting
   659		 * up these resources.  We can assume the device is configured
   660		 * according to the platform needs.
   661		 */
   662		res = &vmd->dev->resource[VMD_MEMBAR1];
   663		upper_bits = upper_32_bits(res->end);
   664		flags = res->flags & ~IORESOURCE_SIZEALIGN;
   665		if (!upper_bits)
   666			flags &= ~IORESOURCE_MEM_64;
   667		vmd->resources[1] = (struct resource) {
   668			.name  = "VMD MEMBAR1",
   669			.start = res->start,
   670			.end   = res->end,
   671			.flags = flags,
   672			.parent = res,
   673		};
   674	
   675		res = &vmd->dev->resource[VMD_MEMBAR2];
   676		upper_bits = upper_32_bits(res->end);
   677		flags = res->flags & ~IORESOURCE_SIZEALIGN;
   678		if (!upper_bits)
   679			flags &= ~IORESOURCE_MEM_64;
   680		vmd->resources[2] = (struct resource) {
   681			.name  = "VMD MEMBAR2",
   682			.start = res->start + membar2_offset,
   683			.end   = res->end,
   684			.flags = flags,
   685			.parent = res,
   686		};
   687	
   688		sd->vmd_domain = true;
   689		sd->domain = vmd_find_free_domain();
   690		if (sd->domain < 0)
   691			return sd->domain;
   692	
   693		sd->node = pcibus_to_node(vmd->dev->bus);
   694	
   695		fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
   696		if (!fn)
   697			return -ENODEV;
   698	
   699		vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
   700							    x86_vector_domain);
   701		irq_domain_free_fwnode(fn);
   702		if (!vmd->irq_domain)
   703			return -ENODEV;
   704	
   705		pci_add_resource(&resources, &vmd->resources[0]);
   706		pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
   707		pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
   708	
   709		vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops,
   710					       sd, &resources);
   711		if (!vmd->bus) {
   712			pci_free_resource_list(&resources);
   713			irq_domain_remove(vmd->irq_domain);
   714			return -ENODEV;
   715		}
   716	
   717		vmd_attach_resources(vmd);
   718		vmd_setup_dma_ops(vmd);
   719		dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
   720		pci_rescan_bus(vmd->bus);
   721	
   722		/*
   723		 * Certain VMD devices may request firmware-first error handling
   724		 * support on the domain. These domains are virtual and not described
   725		 * by ACPI and must be configured manually. VMD domains which utilize
   726		 * firmware-first may still require further kernel error handling, but
   727		 * the domain is intended to first interrupt upon error to system
   728		 * firmware before being passed back to the kernel. The system error
   729		 * handling bits in the root port control register must be enabled
   730		 * following the AER service driver configuration in order to generate
   731		 * these system interrupts.
   732		 *
   733		 * Because the root ports are not described by ACPI and _OSC is
   734		 * unsupported in VMD domains, the intent to use firmware-first error
   735		 * handling in the root ports is instead described by the VMD device's
   736		 * interface bit.
   737		 */
   738		pci_read_config_byte(vmd->dev, PCI_CLASS_PROG, &interface);
   739		if (interface == 0x1) {
   740			struct pci_dev *rpdev;
   741	
   742			list_for_each_entry(rpdev, &vmd->bus->devices, bus_list) {
 > 743				if (rpdev->aer_cap)
   744					pcie_capability_set_word(rpdev, PCI_EXP_RTCTL,
   745								 PCI_EXP_RTCTL_SECEE  |
   746								 PCI_EXP_RTCTL_SENFEE |
   747								 PCI_EXP_RTCTL_SEFEE);
   748			}
   749		}
   750	
   751		WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
   752				       "domain"), "Can't create symlink to domain\n");
   753		return 0;
   754	}
   755	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

Attachment: .config.gz
Description: application/gzip


[Index of Archives]     [DMA Engine]     [Linux Coverity]     [Linux USB]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Greybus]

  Powered by Linux