Hi Hans, On Tue, May 16, 2023 at 10:32 AM Hans de Goede <hdegoede@xxxxxxxxxx> wrote: > > Hi Rafael, > > On 5/15/23 18:30, Rafael J. Wysocki wrote: > > From: Rafael J. Wysocki <rafael.j.wysocki@xxxxxxxxx> > > > > Notice that all of the objects for which the acpi_scan_check_dep() > > return value is greater than 0 are present in acpi_dep_list as consumers > > (there may be multiple entries for one object, but that is not a > > problem), so after carrying out the initial ACPI namespace walk in which > > devices with dependencies are skipped, acpi_bus_scan() can simply walk > > acpi_dep_list and enumerate all of the unique consumer objects from > > there and their descendants instead of walking the entire target branch > > of the ACPI namespace and looking for device objects that have not been > > enumerated yet in it. > > > > Because walking acpi_dep_list is generally less overhead than walking > > the entire ACPI namespace, use the observation above to reduce the > > system initialization overhead related to ACPI, which is particularly > > important on large systems. > > > > Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@xxxxxxxxx> > > --- > > drivers/acpi/scan.c | 79 +++++++++++++++++++++++++++++++++++------------- > > include/acpi/acpi_bus.h | 2 + > > 2 files changed, 61 insertions(+), 20 deletions(-) > > > > Index: linux-pm/include/acpi/acpi_bus.h > > =================================================================== > > --- linux-pm.orig/include/acpi/acpi_bus.h > > +++ linux-pm/include/acpi/acpi_bus.h > > @@ -289,6 +289,8 @@ struct acpi_dep_data { > > acpi_handle supplier; > > acpi_handle consumer; > > bool honor_dep; > > + bool met; > > + bool free_when_met; > > }; > > > > /* Performance Management */ > > Index: linux-pm/drivers/acpi/scan.c > > =================================================================== > > --- linux-pm.orig/drivers/acpi/scan.c > > +++ linux-pm/drivers/acpi/scan.c > > @@ -2029,8 +2029,6 @@ static u32 acpi_scan_check_dep(acpi_hand > > return count; > > } > > > > -static bool acpi_bus_scan_second_pass; > > - > > static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, > > struct acpi_device **adev_p) > > { > > @@ -2050,10 +2048,8 @@ static acpi_status acpi_bus_check_add(ac > > return AE_OK; > > > > /* Bail out if there are dependencies. */ > > - if (acpi_scan_check_dep(handle, check_dep) > 0) { > > - acpi_bus_scan_second_pass = true; > > + if (acpi_scan_check_dep(handle, check_dep) > 0) > > return AE_CTRL_DEPTH; > > - } > > > > fallthrough; > > case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ > > @@ -2301,6 +2297,12 @@ static bool acpi_scan_clear_dep_queue(st > > return true; > > } > > > > +static void acpi_scan_delete_dep_data(struct acpi_dep_data *dep) > > +{ > > + list_del(&dep->node); > > + kfree(dep); > > +} > > + > > static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data) > > { > > struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer); > > @@ -2311,8 +2313,10 @@ static int acpi_scan_clear_dep(struct ac > > acpi_dev_put(adev); > > } > > > > - list_del(&dep->node); > > - kfree(dep); > > + if (dep->free_when_met) > > + acpi_scan_delete_dep_data(dep); > > + else > > + dep->met = true; > > > > return 0; > > } > > @@ -2406,6 +2410,53 @@ struct acpi_device *acpi_dev_get_next_co > > } > > EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev); > > > > +static void acpi_scan_postponed_branch(acpi_handle handle) > > +{ > > + struct acpi_device *adev = NULL; > > + > > + if (ACPI_FAILURE(acpi_bus_check_add(handle, false, &adev))) > > + return; > > + > > + acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, > > + acpi_bus_check_add_2, NULL, NULL, (void **)&adev); > > + acpi_bus_attach(adev, NULL); > > +} > > + > > +static void acpi_scan_postponed(void) > > +{ > > + struct acpi_dep_data *dep, *tmp; > > + > > + mutex_lock(&acpi_dep_list_lock); > > + > > + list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) { > > + acpi_handle handle = dep->consumer; > > + > > + /* > > + * Even though the lock is released here, tmp is guaranteed to > > + * be valid, because none of the list entries following dep is > > + * marked as "free when met" and so they cannot be deleted. > > + */ > > + mutex_unlock(&acpi_dep_list_lock); > > + > > + /* > > + * In case there are multiple acpi_dep_list entries with the > > + * same consumer, skip the current entry if the consumer device > > + * object corresponding to it is present already. > > + */ > > + if (!acpi_fetch_acpi_dev(handle)) > > + acpi_scan_postponed_branch(handle); > > acpi_fetch_acpi_dev(handle) does not need/take the acpi_dep_list_lock, > so you can avoid a needless unlock/lock in case acpi_fetch_acpi_dev(handle) > finds a device already, which will happen quite regular since devices > with _DEP lists regularly have more then 1 dep so they will be present > as consumer on the _DEP list more then once. > > So maybe: > > list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) { > acpi_handle handle = dep->consumer; > struct acpi_device *device = acpi_fetch_acpi_dev(handle); > > /* > * In case there are multiple acpi_dep_list entries with the > * same consumer, skip scanning the current entry if the consumer > * device object corresponding to it is present already. > */ > if (device) > goto check_dep; > > /* > * Even though the lock is released here, tmp is guaranteed to > * be valid, because none of the list entries following dep is > * marked as "free when met" and so they cannot be deleted. > */ > mutex_unlock(&acpi_dep_list_lock); > acpi_scan_postponed_branch(handle); > mutex_lock(&acpi_dep_list_lock); > > check_dep: > if (dep->met) > acpi_scan_delete_dep_data(dep); > else > dep->free_when_met = true; > } > > ? Thanks for the suggestion, sounds good. I'll send a v2 modified along these lines shortly.