FROMGIT: driver core: Call sync_state() even if supplier has no consumers

The initial patch that added sync_state() support didn't handle the case
where a supplier has no consumers. This was because when a device is
successfully bound with a driver, only its suppliers were checked to see
if they are eligible to get a sync_state(). This is not sufficient for
devices that have no consumers but still need to do device state clean
up. So fix this.

Fixes: fc5a251d0fd7ca90 (driver core: Add sync_state driver/bus callback)
Signed-off-by: Saravana Kannan <saravanak@google.com>
Cc: stable <stable@vger.kernel.org>
Link: https://lore.kernel.org/r/20200221080510.197337-2-saravanak@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit 21eb93f432b1a785df193df1a56a59e9eb3a985f
https: //git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git driver-core-linus)
Bug: 150980623
Change-Id: I9bebc164f00d7797501f40080c12a04dbe3095b1
(cherry picked from commit a27e0934dec04ca9878e9cb7f99206bbb75f1f4d)
This commit is contained in:
Saravana Kannan
2020-02-21 00:05:08 -08:00
parent 8ca1cb498b
commit ab4eba9a90

View File

@@ -638,25 +638,31 @@ static void __device_links_queue_sync_state(struct device *dev,
/**
* device_links_flush_sync_list - Call sync_state() on a list of devices
* @list: List of devices to call sync_state() on
* @dont_lock_dev: Device for which lock is already held by the caller
*
* Calls sync_state() on all the devices that have been queued for it. This
* function is used in conjunction with __device_links_queue_sync_state().
* function is used in conjunction with __device_links_queue_sync_state(). The
* @dont_lock_dev parameter is useful when this function is called from a
* context where a device lock is already held.
*/
static void device_links_flush_sync_list(struct list_head *list)
static void device_links_flush_sync_list(struct list_head *list,
struct device *dont_lock_dev)
{
struct device *dev, *tmp;
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
list_del_init(&dev->links.defer_sync);
device_lock(dev);
if (dev != dont_lock_dev)
device_lock(dev);
if (dev->bus->sync_state)
dev->bus->sync_state(dev);
else if (dev->driver && dev->driver->sync_state)
dev->driver->sync_state(dev);
device_unlock(dev);
if (dev != dont_lock_dev)
device_unlock(dev);
put_device(dev);
}
@@ -694,7 +700,7 @@ void device_links_supplier_sync_state_resume(void)
out:
device_links_write_unlock();
device_links_flush_sync_list(&sync_list);
device_links_flush_sync_list(&sync_list, NULL);
}
static int sync_state_resume_initcall(void)
@@ -745,6 +751,11 @@ void device_links_driver_bound(struct device *dev)
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
}
if (defer_sync_state_count)
__device_links_supplier_defer_sync(dev);
else
__device_links_queue_sync_state(dev, &sync_list);
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (link->flags & DL_FLAG_STATELESS)
continue;
@@ -763,7 +774,7 @@ void device_links_driver_bound(struct device *dev)
device_links_write_unlock();
device_links_flush_sync_list(&sync_list);
device_links_flush_sync_list(&sync_list, dev);
}
/**