Now that pure managed region request functions are available, the implementation of the hybrid-functions which are only sometimes managed can be made more consistent and readable by wrapping those always-managed functions. Implement a new pcim_ function for exclusively requested regions. Have the pci_request / release functions call their pcim_ counterparts. Remove the now surplus region_mask from struct pci_devres. Signed-off-by: Philipp Stanner <pstanner@xxxxxxxxxx> --- drivers/pci/devres.c | 49 ++++++++++++++++++++++--------------------- drivers/pci/pci.c | 50 +++++++++++++++----------------------------- drivers/pci/pci.h | 6 ------ include/linux/pci.h | 1 + 4 files changed, 43 insertions(+), 63 deletions(-) diff --git a/drivers/pci/devres.c b/drivers/pci/devres.c index bd24bad187d9..d9cd7f97c38c 100644 --- a/drivers/pci/devres.c +++ b/drivers/pci/devres.c @@ -22,18 +22,15 @@ * _sometimes_ managed (e.g. pci_request_region()). * Consequently, in the new API, region requests performed by the pcim_ * functions are automatically cleaned up through the devres callback - * pcim_addr_resource_release(), while requests performed by - * pcim_enable_device() + pci_*region*() are automatically cleaned up - * through the for-loop in pcim_release(). + * pcim_addr_resource_release(). + * Users utilizing pcim_enable_device() + pci_*region*() are redirected in + * pci.c to the managed functions here in this file. This isn't exactly + * perfect, but the only alternative way would be to port ALL drivers using + * said combination to pcim_ functions. * - * TODO 1: + * TODO: * Remove the legacy table entirely once all calls to pcim_iomap_table() in * the kernel have been removed. - * - * TODO 2: - * Port everyone calling pcim_enable_device() + pci_*region*() to using the - * pcim_ functions. Then, remove all devres functionality from pci_*region*() - * functions and remove the associated cleanups described above in point #2. */ /* @@ -399,21 +396,6 @@ static void pcim_release(struct device *gendev, void *res) { struct pci_dev *dev = to_pci_dev(gendev); struct pci_devres *this = res; - int i; - - /* - * This is legacy code. - * All regions requested by a pcim_ function do get released through - * pcim_addr_resource_release(). Thanks to the hybrid nature of the pci_ - * region-request functions, this for-loop has to release the regions - * if they have been requested by such a function. - * - * TODO: Remove this once all users of pcim_enable_device() PLUS - * pci-region-request-functions have been ported to pcim_ functions. - */ - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) - if (this->region_mask & (1 << i)) - pci_release_region(dev, i); if (this->mwi) pci_clear_mwi(dev); @@ -964,6 +946,25 @@ int pcim_request_region(struct pci_dev *pdev, int bar, const char *name) } EXPORT_SYMBOL(pcim_request_region); +/** + * pcim_request_region_exclusive - Request a PCI BAR exclusively + * @pdev: PCI device to requestion region for + * @bar: Index of BAR to request + * @name: Name associated with the request + * + * Returns: 0 on success, a negative error code on failure. + * + * Request region specified by @bar exclusively. + * + * The region will automatically be released on driver detach. If desired, + * release manually only with pcim_release_region(). + */ +int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name) +{ + return _pcim_request_region(pdev, bar, name, IORESOURCE_EXCLUSIVE); +} +EXPORT_SYMBOL(pcim_request_region_exclusive); + /** * pcim_release_region - Release a PCI BAR * @pdev: PCI device to operate on diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 9d9d09534efe..c0c1ee17a06b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3869,7 +3869,15 @@ EXPORT_SYMBOL(pci_enable_atomic_ops_to_root); */ void pci_release_region(struct pci_dev *pdev, int bar) { - struct pci_devres *dr; + /* + * This is done for backwards compatibility, because the old pci-devres + * API had a mode in which the function became managed if it had been + * enabled with pcim_enable_device() instead of pci_enable_device(). + */ + if (pci_is_managed(pdev)) { + pcim_release_region(pdev, bar); + return; + } if (pci_resource_len(pdev, bar) == 0) return; @@ -3879,20 +3887,6 @@ void pci_release_region(struct pci_dev *pdev, int bar) else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) release_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); - - /* - * This devres utility makes this function sometimes managed - * (when pcim_enable_device() has been called before). - * This is bad because it conflicts with the pcim_ functions being - * exclusively responsible for managed pci. Its "sometimes yes, sometimes - * no" nature can cause bugs. - * - * TODO: Remove this once all users that use pcim_enable_device() PLUS - * a region request function have been ported to using pcim_ functions. - */ - dr = find_pci_dr(pdev); - if (dr) - dr->region_mask &= ~(1 << bar); } EXPORT_SYMBOL(pci_release_region); @@ -3920,14 +3914,18 @@ EXPORT_SYMBOL(pci_release_region); * NOTE: * This is a "hybrid" function: Its normally unmanaged, but becomes managed * when pcim_enable_device() has been called in advance. - * This hybrid feature is DEPRECATED! If you need to implement a new pci - * function that does automatic cleanup, write a new pcim_* function that uses - * devres directly. + * This hybrid feature is DEPRECATED! If you want managed cleanup, use the + * pcim_* functions instead. */ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, int exclusive) { - struct pci_devres *dr; + if (pci_is_managed(pdev)) { + if (exclusive == IORESOURCE_EXCLUSIVE) + return pcim_request_region_exclusive(pdev, bar, res_name); + + return pcim_request_region(pdev, bar, res_name); + } if (pci_resource_len(pdev, bar) == 0) return 0; @@ -3943,20 +3941,6 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, goto err_out; } - /* - * This devres utility makes this function sometimes managed - * (when pcim_enable_device() has been called before). - * This is bad because it conflicts with the pcim_ functions being - * exclusively responsible for managed pci. Its "sometimes yes, sometimes - * no" nature can cause bugs. - * - * TODO: Remove this once all users that use pcim_enable_device() PLUS - * a region request function have been ported to using pcim_ functions. - */ - dr = find_pci_dr(pdev); - if (dr) - dr->region_mask |= 1 << bar; - return 0; err_out: diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 171884aba8e1..040ed2825554 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -828,12 +828,6 @@ struct pci_devres { unsigned int orig_intx:1; unsigned int restore_intx:1; unsigned int mwi:1; - - /* - * TODO: remove the region_mask once everyone calling - * pcim_enable_device() + pci_*region*() is ported to pcim_ functions. - */ - u32 region_mask; }; struct pci_devres *find_pci_dr(struct pci_dev *pdev); diff --git a/include/linux/pci.h b/include/linux/pci.h index 5782ad034178..0f203338f820 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2330,6 +2330,7 @@ int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, const char *name); void pcim_iounmap_regions(struct pci_dev *pdev, int mask); int pcim_request_region(struct pci_dev *pdev, int bar, const char *res_name); +int pcim_request_region_exclusive(struct pci_dev *pdev, int bar, const char *name); void pcim_release_region(struct pci_dev *pdev, int bar); void pcim_release_all_regions(struct pci_dev *pdev); int pcim_request_all_regions(struct pci_dev *pdev, const char *name); -- 2.44.0