Merge branch 'pm-domains' into for-linus

* pm-domains: (33 commits)
  ARM / shmobile: Return -EBUSY from A4LC power off if A3RV is active
  PM / Domains: Take .power_off() error code into account
  ARM / shmobile: Use genpd_queue_power_off_work()
  ARM / shmobile: Use pm_genpd_poweroff_unused()
  PM / Domains: Introduce function to power off all unused PM domains
  PM / Domains: Queue up power off work only if it is not pending
  PM / Domains: Improve handling of wakeup devices during system suspend
  PM / Domains: Do not restore all devices on power off error
  PM / Domains: Allow callbacks to execute all runtime PM helpers
  PM / Domains: Do not execute device callbacks under locks
  PM / Domains: Make failing pm_genpd_prepare() clean up properly
  PM / Domains: Set device state to "active" during system resume
  ARM: mach-shmobile: sh7372 A3RV requires A4LC
  PM / Domains: Export pm_genpd_poweron() in header
  ARM: mach-shmobile: sh7372 late pm domain off
  ARM: mach-shmobile: Runtime PM late init callback
  ARM: mach-shmobile: sh7372 D4 support
  ARM: mach-shmobile: sh7372 A4MP support
  ARM: mach-shmobile: sh7372: make sure that fsi is peripheral of spu2
  ARM: mach-shmobile: sh7372 A3SG support
  ...
This commit is contained in:
Rafael J. Wysocki
2011-07-15 23:59:09 +02:00
24 changed files with 1937 additions and 171 deletions

View File

@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG

View File

@@ -15,9 +15,9 @@
#include <linux/slab.h>
#include <linux/err.h>
#ifdef CONFIG_PM_RUNTIME
#ifdef CONFIG_PM
struct pm_runtime_clk_data {
struct pm_clk_data {
struct list_head clock_list;
struct mutex lock;
};
@@ -36,25 +36,25 @@ struct pm_clock_entry {
enum pce_status status;
};
static struct pm_runtime_clk_data *__to_prd(struct device *dev)
static struct pm_clk_data *__to_pcd(struct device *dev)
{
return dev ? dev->power.subsys_data : NULL;
}
/**
* pm_runtime_clk_add - Start using a device clock for runtime PM.
* @dev: Device whose clock is going to be used for runtime PM.
* pm_clk_add - Start using a device clock for power management.
* @dev: Device whose clock is going to be used for power management.
* @con_id: Connection ID of the clock.
*
* Add the clock represented by @con_id to the list of clocks used for
* the runtime PM of @dev.
* the power management of @dev.
*/
int pm_runtime_clk_add(struct device *dev, const char *con_id)
int pm_clk_add(struct device *dev, const char *con_id)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
if (!prd)
if (!pcd)
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -73,20 +73,20 @@ int pm_runtime_clk_add(struct device *dev, const char *con_id)
}
}
mutex_lock(&prd->lock);
list_add_tail(&ce->node, &prd->clock_list);
mutex_unlock(&prd->lock);
mutex_lock(&pcd->lock);
list_add_tail(&ce->node, &pcd->clock_list);
mutex_unlock(&pcd->lock);
return 0;
}
/**
* __pm_runtime_clk_remove - Destroy runtime PM clock entry.
* @ce: Runtime PM clock entry to destroy.
* __pm_clk_remove - Destroy PM clock entry.
* @ce: PM clock entry to destroy.
*
* This routine must be called under the mutex protecting the runtime PM list
* of clocks corresponding the the @ce's device.
* This routine must be called under the mutex protecting the PM list of clocks
* corresponding the the @ce's device.
*/
static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
static void __pm_clk_remove(struct pm_clock_entry *ce)
{
if (!ce)
return;
@@ -108,95 +108,99 @@ static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
}
/**
* pm_runtime_clk_remove - Stop using a device clock for runtime PM.
* @dev: Device whose clock should not be used for runtime PM any more.
* pm_clk_remove - Stop using a device clock for power management.
* @dev: Device whose clock should not be used for PM any more.
* @con_id: Connection ID of the clock.
*
* Remove the clock represented by @con_id from the list of clocks used for
* the runtime PM of @dev.
* the power management of @dev.
*/
void pm_runtime_clk_remove(struct device *dev, const char *con_id)
void pm_clk_remove(struct device *dev, const char *con_id)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
if (!prd)
if (!pcd)
return;
mutex_lock(&prd->lock);
mutex_lock(&pcd->lock);
list_for_each_entry(ce, &prd->clock_list, node) {
list_for_each_entry(ce, &pcd->clock_list, node) {
if (!con_id && !ce->con_id) {
__pm_runtime_clk_remove(ce);
__pm_clk_remove(ce);
break;
} else if (!con_id || !ce->con_id) {
continue;
} else if (!strcmp(con_id, ce->con_id)) {
__pm_runtime_clk_remove(ce);
__pm_clk_remove(ce);
break;
}
}
mutex_unlock(&prd->lock);
mutex_unlock(&pcd->lock);
}
/**
* pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
* @dev: Device to initialize the list of runtime PM clocks for.
* pm_clk_init - Initialize a device's list of power management clocks.
* @dev: Device to initialize the list of PM clocks for.
*
* Allocate a struct pm_runtime_clk_data object, initialize its lock member and
* Allocate a struct pm_clk_data object, initialize its lock member and
* make the @dev's power.subsys_data field point to it.
*/
int pm_runtime_clk_init(struct device *dev)
int pm_clk_init(struct device *dev)
{
struct pm_runtime_clk_data *prd;
struct pm_clk_data *pcd;
prd = kzalloc(sizeof(*prd), GFP_KERNEL);
if (!prd) {
dev_err(dev, "Not enough memory fo runtime PM data.\n");
pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
if (!pcd) {
dev_err(dev, "Not enough memory for PM clock data.\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&prd->clock_list);
mutex_init(&prd->lock);
dev->power.subsys_data = prd;
INIT_LIST_HEAD(&pcd->clock_list);
mutex_init(&pcd->lock);
dev->power.subsys_data = pcd;
return 0;
}
/**
* pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
* @dev: Device to destroy the list of runtime PM clocks for.
* pm_clk_destroy - Destroy a device's list of power management clocks.
* @dev: Device to destroy the list of PM clocks for.
*
* Clear the @dev's power.subsys_data field, remove the list of clock entries
* from the struct pm_runtime_clk_data object pointed to by it before and free
* from the struct pm_clk_data object pointed to by it before and free
* that object.
*/
void pm_runtime_clk_destroy(struct device *dev)
void pm_clk_destroy(struct device *dev)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce, *c;
if (!prd)
if (!pcd)
return;
dev->power.subsys_data = NULL;
mutex_lock(&prd->lock);
mutex_lock(&pcd->lock);
list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
__pm_runtime_clk_remove(ce);
list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
__pm_clk_remove(ce);
mutex_unlock(&prd->lock);
mutex_unlock(&pcd->lock);
kfree(prd);
kfree(pcd);
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
/**
* pm_runtime_clk_acquire - Acquire a device clock.
* pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @con_id: Connection ID of the clock.
*/
static void pm_runtime_clk_acquire(struct device *dev,
static void pm_clk_acquire(struct device *dev,
struct pm_clock_entry *ce)
{
ce->clk = clk_get(dev, ce->con_id);
@@ -209,24 +213,24 @@ static void pm_runtime_clk_acquire(struct device *dev,
}
/**
* pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
* pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
*/
int pm_runtime_clk_suspend(struct device *dev)
int pm_clk_suspend(struct device *dev)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
if (!prd)
if (!pcd)
return 0;
mutex_lock(&prd->lock);
mutex_lock(&pcd->lock);
list_for_each_entry_reverse(ce, &prd->clock_list, node) {
list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
pm_runtime_clk_acquire(dev, ce);
pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_disable(ce->clk);
@@ -234,30 +238,30 @@ int pm_runtime_clk_suspend(struct device *dev)
}
}
mutex_unlock(&prd->lock);
mutex_unlock(&pcd->lock);
return 0;
}
/**
* pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
* pm_clk_resume - Enable clocks in a device's PM clock list.
* @dev: Device to enable the clocks for.
*/
int pm_runtime_clk_resume(struct device *dev)
int pm_clk_resume(struct device *dev)
{
struct pm_runtime_clk_data *prd = __to_prd(dev);
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
if (!prd)
if (!pcd)
return 0;
mutex_lock(&prd->lock);
mutex_lock(&pcd->lock);
list_for_each_entry(ce, &prd->clock_list, node) {
list_for_each_entry(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
pm_runtime_clk_acquire(dev, ce);
pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_enable(ce->clk);
@@ -265,28 +269,28 @@ int pm_runtime_clk_resume(struct device *dev)
}
}
mutex_unlock(&prd->lock);
mutex_unlock(&pcd->lock);
return 0;
}
/**
* pm_runtime_clk_notify - Notify routine for device addition and removal.
* pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
*
* For this function to work, @nb must be a member of an object of type
* struct pm_clk_notifier_block containing all of the requisite data.
* Specifically, the pwr_domain member of that object is copied to the device's
* pwr_domain field and its con_ids member is used to populate the device's list
* of runtime PM clocks, depending on @action.
* Specifically, the pm_domain member of that object is copied to the device's
* pm_domain field and its con_ids member is used to populate the device's list
* of PM clocks, depending on @action.
*
* If the device's pwr_domain field is already populated with a value different
* If the device's pm_domain field is already populated with a value different
* from the one stored in the struct pm_clk_notifier_block object, the function
* does nothing.
*/
static int pm_runtime_clk_notify(struct notifier_block *nb,
static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
@@ -300,28 +304,28 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
if (dev->pwr_domain)
if (dev->pm_domain)
break;
error = pm_runtime_clk_init(dev);
error = pm_clk_init(dev);
if (error)
break;
dev->pwr_domain = clknb->pwr_domain;
dev->pm_domain = clknb->pm_domain;
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
pm_runtime_clk_add(dev, *con_id);
pm_clk_add(dev, *con_id);
} else {
pm_runtime_clk_add(dev, NULL);
pm_clk_add(dev, NULL);
}
break;
case BUS_NOTIFY_DEL_DEVICE:
if (dev->pwr_domain != clknb->pwr_domain)
if (dev->pm_domain != clknb->pm_domain)
break;
dev->pwr_domain = NULL;
pm_runtime_clk_destroy(dev);
dev->pm_domain = NULL;
pm_clk_destroy(dev);
break;
}
@@ -330,6 +334,60 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
#else /* !CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM
/**
* pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
*/
int pm_clk_suspend(struct device *dev)
{
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
/* If there is no driver, the clocks are already disabled. */
if (!pcd || !dev->driver)
return 0;
mutex_lock(&pcd->lock);
list_for_each_entry_reverse(ce, &pcd->clock_list, node)
clk_disable(ce->clk);
mutex_unlock(&pcd->lock);
return 0;
}
/**
* pm_clk_resume - Enable clocks in a device's PM clock list.
* @dev: Device to enable the clocks for.
*/
int pm_clk_resume(struct device *dev)
{
struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
/* If there is no driver, the clocks should remain disabled. */
if (!pcd || !dev->driver)
return 0;
mutex_lock(&pcd->lock);
list_for_each_entry(ce, &pcd->clock_list, node)
clk_enable(ce->clk);
mutex_unlock(&pcd->lock);
return 0;
}
#endif /* CONFIG_PM */
/**
* enable_clock - Enable a device clock.
* @dev: Device whose clock is to be enabled.
@@ -365,7 +423,7 @@ static void disable_clock(struct device *dev, const char *con_id)
}
/**
* pm_runtime_clk_notify - Notify routine for device addition and removal.
* pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
@@ -375,7 +433,7 @@ static void disable_clock(struct device *dev, const char *con_id)
* Specifically, the con_ids member of that object is used to enable or disable
* the device's clocks, depending on @action.
*/
static int pm_runtime_clk_notify(struct notifier_block *nb,
static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
@@ -411,21 +469,21 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
#endif /* !CONFIG_PM_RUNTIME */
/**
* pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
* pm_clk_add_notifier - Add bus type notifier for power management clocks.
* @bus: Bus type to add the notifier to.
* @clknb: Notifier to be added to the given bus type.
*
* The nb member of @clknb is not expected to be initialized and its
* notifier_call member will be replaced with pm_runtime_clk_notify(). However,
* notifier_call member will be replaced with pm_clk_notify(). However,
* the remaining members of @clknb should be populated prior to calling this
* routine.
*/
void pm_runtime_clk_add_notifier(struct bus_type *bus,
void pm_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
if (!bus || !clknb)
return;
clknb->nb.notifier_call = pm_runtime_clk_notify;
clknb->nb.notifier_call = pm_clk_notify;
bus_register_notifier(bus, &clknb->nb);
}

1273
drivers/base/power/domain.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -94,12 +94,13 @@ int pm_generic_prepare(struct device *dev)
* __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
* @dev: Device to handle.
* @event: PM transition of the system under way.
* @bool: Whether or not this is the "noirq" stage.
*
* If the device has not been suspended at run time, execute the
* suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
* return its error code. Otherwise, return zero.
*/
static int __pm_generic_call(struct device *dev, int event)
static int __pm_generic_call(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
@@ -109,16 +110,16 @@ static int __pm_generic_call(struct device *dev, int event)
switch (event) {
case PM_EVENT_SUSPEND:
callback = pm->suspend;
callback = noirq ? pm->suspend_noirq : pm->suspend;
break;
case PM_EVENT_FREEZE:
callback = pm->freeze;
callback = noirq ? pm->freeze_noirq : pm->freeze;
break;
case PM_EVENT_HIBERNATE:
callback = pm->poweroff;
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
break;
case PM_EVENT_THAW:
callback = pm->thaw;
callback = noirq ? pm->thaw_noirq : pm->thaw;
break;
default:
callback = NULL;
@@ -128,43 +129,83 @@ static int __pm_generic_call(struct device *dev, int event)
return callback ? callback(dev) : 0;
}
/**
* pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
/**
* pm_generic_suspend - Generic suspend callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_SUSPEND);
return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
/**
* pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_FREEZE);
return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
/**
* pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
/**
* pm_generic_poweroff - Generic poweroff callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_HIBERNATE);
return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
/**
* pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw_noirq(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_THAW, true);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
return __pm_generic_call(dev, PM_EVENT_THAW);
return __pm_generic_call(dev, PM_EVENT_THAW, false);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -172,12 +213,13 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
* __pm_generic_resume - Generic resume/restore callback for subsystems.
* @dev: Device to handle.
* @event: PM transition of the system under way.
* @bool: Whether or not this is the "noirq" stage.
*
* Execute the resume/resotre callback provided by the @dev's driver, if
* defined. If it returns 0, change the device's runtime PM status to 'active'.
* Return the callback's error code.
*/
static int __pm_generic_resume(struct device *dev, int event)
static int __pm_generic_resume(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
@@ -188,10 +230,10 @@ static int __pm_generic_resume(struct device *dev, int event)
switch (event) {
case PM_EVENT_RESUME:
callback = pm->resume;
callback = noirq ? pm->resume_noirq : pm->resume;
break;
case PM_EVENT_RESTORE:
callback = pm->restore;
callback = noirq ? pm->restore_noirq : pm->restore;
break;
default:
callback = NULL;
@@ -202,7 +244,7 @@ static int __pm_generic_resume(struct device *dev, int event)
return 0;
ret = callback(dev);
if (!ret && pm_runtime_enabled(dev)) {
if (!ret && !noirq && pm_runtime_enabled(dev)) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -211,23 +253,43 @@ static int __pm_generic_resume(struct device *dev, int event)
return ret;
}
/**
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume_noirq(struct device *dev)
{
return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
/**
* pm_generic_resume - Generic resume callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume(struct device *dev)
{
return __pm_generic_resume(dev, PM_EVENT_RESUME);
return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
/**
* pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore_noirq(struct device *dev)
{
return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
/**
* pm_generic_restore - Generic restore callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore(struct device *dev)
{
return __pm_generic_resume(dev, PM_EVENT_RESTORE);
return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
@@ -256,11 +318,17 @@ struct dev_pm_ops generic_subsys_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.prepare = pm_generic_prepare,
.suspend = pm_generic_suspend,
.suspend_noirq = pm_generic_suspend_noirq,
.resume = pm_generic_resume,
.resume_noirq = pm_generic_resume_noirq,
.freeze = pm_generic_freeze,
.freeze_noirq = pm_generic_freeze_noirq,
.thaw = pm_generic_thaw,
.thaw_noirq = pm_generic_thaw_noirq,
.poweroff = pm_generic_poweroff,
.poweroff_noirq = pm_generic_poweroff_noirq,
.restore = pm_generic_restore,
.restore_noirq = pm_generic_restore_noirq,
.complete = pm_generic_complete,
#endif
#ifdef CONFIG_PM_RUNTIME

View File

@@ -425,9 +425,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->pwr_domain) {
if (dev->pm_domain) {
pm_dev_dbg(dev, state, "EARLY power domain ");
error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "EARLY type ");
error = pm_noirq_op(dev, dev->type->pm, state);
@@ -521,9 +521,9 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (!dev->power.is_suspended)
goto Unlock;
if (dev->pwr_domain) {
if (dev->pm_domain) {
pm_dev_dbg(dev, state, "power domain ");
error = pm_op(dev, &dev->pwr_domain->ops, state);
error = pm_op(dev, &dev->pm_domain->ops, state);
goto End;
}
@@ -641,10 +641,10 @@ static void device_complete(struct device *dev, pm_message_t state)
{
device_lock(dev);
if (dev->pwr_domain) {
if (dev->pm_domain) {
pm_dev_dbg(dev, state, "completing power domain ");
if (dev->pwr_domain->ops.complete)
dev->pwr_domain->ops.complete(dev);
if (dev->pm_domain->ops.complete)
dev->pm_domain->ops.complete(dev);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "completing type ");
if (dev->type->pm->complete)
@@ -744,9 +744,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error;
if (dev->pwr_domain) {
if (dev->pm_domain) {
pm_dev_dbg(dev, state, "LATE power domain ");
error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
if (error)
return error;
} else if (dev->type && dev->type->pm) {
@@ -853,9 +853,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto Unlock;
}
if (dev->pwr_domain) {
if (dev->pm_domain) {
pm_dev_dbg(dev, state, "power domain ");
error = pm_op(dev, &dev->pwr_domain->ops, state);
error = pm_op(dev, &dev->pm_domain->ops, state);
goto End;
}
@@ -982,11 +982,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
if (dev->pwr_domain) {
if (dev->pm_domain) {
pm_dev_dbg(dev, state, "preparing power domain ");
if (dev->pwr_domain->ops.prepare)
error = dev->pwr_domain->ops.prepare(dev);
suspend_report_result(dev->pwr_domain->ops.prepare, error);
if (dev->pm_domain->ops.prepare)
error = dev->pm_domain->ops.prepare(dev);
suspend_report_result(dev->pm_domain->ops.prepare, error);
if (error)
goto End;
} else if (dev->type && dev->type->pm) {

View File

@@ -213,8 +213,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
if (dev->pwr_domain)
callback = dev->pwr_domain->ops.runtime_idle;
if (dev->pm_domain)
callback = dev->pm_domain->ops.runtime_idle;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_idle;
else if (dev->class && dev->class->pm)
@@ -374,8 +374,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
if (dev->pwr_domain)
callback = dev->pwr_domain->ops.runtime_suspend;
if (dev->pm_domain)
callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
@@ -573,8 +573,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
if (dev->pwr_domain)
callback = dev->pwr_domain->ops.runtime_resume;
if (dev->pm_domain)
callback = dev->pm_domain->ops.runtime_resume;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)