diff options
author | Mayuresh Kulkarni <mkulkarni@nvidia.com> | 2012-02-03 15:06:07 +0530 |
---|---|---|
committer | Simone Willett <swillett@nvidia.com> | 2012-02-13 09:27:15 -0800 |
commit | 410041d0247db2434a3013f16930d2fcd16256c8 (patch) | |
tree | bf70ce748d00dad01f80f199a1d82b5f31a0ba26 /drivers/video/tegra/host/nvhost_acm.c | |
parent | e8dc4bd80bf2a31b46bf2bad034dcd514c1e11f5 (diff) |
video: tegra: host: use runtime pm for clock management
- use runtime pm for clock management of host1x
and its clients thus replacing ACM
- start a delayed worker after disabling the clock
if module supports power gating
- in its timeout handler power gate the module after saving
its context for next submit
- use auto-suspend mode of runtime pm for clock management
- pm core seems to keep a ref count on runtime pm thus
we cannot use runtime pm's usage_count as an idicator
of module idle during suspend
- do not use runtime pm call-backs during system suspend.
instead manage the clocks directly for context save of
modules that support it
- enable runtime pm only during boot-up as pm core disables
it before suspending the device and enables it after resume
for bug 887332
Change-Id: I3b30643e8e75c13684cf4edaaae4429c3a18d6eb
Signed-off-by: Mayuresh Kulkarni <mkulkarni@nvidia.com>
Reviewed-on: http://git-master/r/79186
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/video/tegra/host/nvhost_acm.c')
-rw-r--r-- | drivers/video/tegra/host/nvhost_acm.c | 228 |
1 files changed, 108 insertions, 120 deletions
diff --git a/drivers/video/tegra/host/nvhost_acm.c b/drivers/video/tegra/host/nvhost_acm.c index a2386a257c8f..351b70e0b6fb 100644 --- a/drivers/video/tegra/host/nvhost_acm.c +++ b/drivers/video/tegra/host/nvhost_acm.c @@ -29,13 +29,14 @@ #include <linux/device.h> #include <linux/delay.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <mach/powergate.h> #include <mach/clk.h> #include <mach/hardware.h> -#define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT (2 * HZ) -#define POWERGATE_DELAY 10 -#define MAX_DEVID_LENGTH 16 +#define ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT (2 * HZ) +#define POWERGATE_DELAY 10 +#define MAX_DEVID_LENGTH 16 DEFINE_MUTEX(client_list_lock); @@ -98,139 +99,113 @@ void nvhost_module_reset(struct nvhost_device *dev) __func__, dev->name); } -static void to_state_clockgated_locked(struct nvhost_device *dev) -{ - if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) { - int i; - for (i = 0; i < dev->num_clks; i++) - clk_disable(dev->clk[i]); - if (dev->dev.parent) - nvhost_module_idle(to_nvhost_device(dev->dev.parent)); - } else if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED - && dev->can_powergate) { - do_unpowergate_locked(dev->powergate_ids[0]); - do_unpowergate_locked(dev->powergate_ids[1]); - } - dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED; -} - -static void to_state_running_locked(struct nvhost_device *dev) -{ - int prev_state = dev->powerstate; - if (dev->powerstate == NVHOST_POWER_STATE_POWERGATED) - to_state_clockgated_locked(dev); - if (dev->powerstate == NVHOST_POWER_STATE_CLOCKGATED) { - int i; - - if (dev->dev.parent) - nvhost_module_busy(to_nvhost_device(dev->dev.parent)); - - for (i = 0; i < dev->num_clks; i++) { - int err = clk_enable(dev->clk[i]); - BUG_ON(err); - } - - if (prev_state == NVHOST_POWER_STATE_POWERGATED - && dev->finalize_poweron) - dev->finalize_poweron(dev); - } - dev->powerstate = NVHOST_POWER_STATE_RUNNING; -} - /* This gets called from powergate_handler() and from module suspend. * Module suspend is done for all modules, runtime power gating only * for modules with can_powergate set. */ -static int to_state_powergated_locked(struct nvhost_device *dev) +static int to_state_powergated_locked(struct nvhost_device *dev, + bool system_suspend) { - int err = 0; + int err = 0, i = 0; + + if (dev->prepare_poweroff && dev->powered) { + struct nvhost_device *device; + struct device *parent = dev->dev.parent; + if (parent) + device = to_nvhost_device(parent); + + if (system_suspend) { + /* enable parent clock + * host1x does not have parent */ + if (parent) { + for (i = 0; i < device->num_clks; i++) + clk_enable(device->clk[i]); + } + + /* enable module clock */ + for (i = 0; i < dev->num_clks; i++) + clk_enable(dev->clk[i]); + } else + pm_runtime_get_sync(&dev->dev); - if (dev->prepare_poweroff - && dev->powerstate != NVHOST_POWER_STATE_POWERGATED) { - /* Clock needs to be on in prepare_poweroff */ - to_state_running_locked(dev); err = dev->prepare_poweroff(dev); if (err) return err; - } - if (dev->powerstate == NVHOST_POWER_STATE_RUNNING) - to_state_clockgated_locked(dev); + if (system_suspend) { + /* disable module clock */ + for (i = 0; i < dev->num_clks; i++) + clk_disable(dev->clk[i]); + + /* disable parent clock + * host1x does not have parent */ + if (parent) { + for (i = 0; i < device->num_clks; i++) + clk_disable(device->clk[i]); + } + } else + pm_runtime_put_sync_suspend(&dev->dev); + } if (dev->can_powergate) { do_powergate_locked(dev->powergate_ids[0]); do_powergate_locked(dev->powergate_ids[1]); + dev->powered = false; } - dev->powerstate = NVHOST_POWER_STATE_POWERGATED; return 0; } -static void schedule_powergating_locked(struct nvhost_device *dev) -{ - if (dev->can_powergate) - schedule_delayed_work(&dev->powerstate_down, - msecs_to_jiffies(dev->powergate_delay)); -} - -static void schedule_clockgating_locked(struct nvhost_device *dev) -{ - schedule_delayed_work(&dev->powerstate_down, - msecs_to_jiffies(dev->clockgate_delay)); -} - void nvhost_module_busy(struct nvhost_device *dev) { if (dev->busy) dev->busy(dev); mutex_lock(&dev->lock); - cancel_delayed_work(&dev->powerstate_down); - dev->refcount++; - if (dev->refcount > 0 && !nvhost_module_powered(dev)) - to_state_running_locked(dev); - mutex_unlock(&dev->lock); -} - -static void powerstate_down_handler(struct work_struct *work) -{ - struct nvhost_device *dev; - - dev = container_of(to_delayed_work(work), - struct nvhost_device, - powerstate_down); - - mutex_lock(&dev->lock); - if (dev->refcount == 0) { - switch (dev->powerstate) { - case NVHOST_POWER_STATE_RUNNING: - to_state_clockgated_locked(dev); - schedule_powergating_locked(dev); - break; - case NVHOST_POWER_STATE_CLOCKGATED: - if (to_state_powergated_locked(dev)) - schedule_powergating_locked(dev); - break; - default: - break; + if (dev->can_powergate) { + /* cancel power-gate handler */ + cancel_delayed_work_sync(&dev->powerstate_down); + + /* unpowergate the module if it was power gated */ + if (!dev->powered) { + do_unpowergate_locked(dev->powergate_ids[0]); + do_unpowergate_locked(dev->powergate_ids[1]); + dev->powered = true; } } + + pm_runtime_get_sync(&dev->dev); + mutex_unlock(&dev->lock); } +static bool is_module_idle(struct nvhost_device *dev, bool system_suspend) +{ + /* for system suspend, pm core holds a reference on runtime pm. + * this is for kernels >= 3.x, it is not there for kernels < 3.x. + * for more details refer the LKML thread: + * https://lkml.org/lkml/2011/6/25/93 + * https://lkml.org/lkml/2011/6/25/94 + * https://lkml.org/lkml/2011/6/25/95 */ + if (system_suspend) + return atomic_read(&dev->dev.power.usage_count) == 1; + else + return atomic_read(&dev->dev.power.usage_count) == 0; +} void nvhost_module_idle_mult(struct nvhost_device *dev, int refs) { bool kick = false; + int i; mutex_lock(&dev->lock); - dev->refcount -= refs; - if (dev->refcount == 0) { - if (nvhost_module_powered(dev)) - schedule_clockgating_locked(dev); + for (i = 0; i < refs; i++) + pm_runtime_put_sync(&dev->dev); + + if (is_module_idle(dev, false)) kick = true; - } + mutex_unlock(&dev->lock); if (kick) { @@ -241,6 +216,19 @@ void nvhost_module_idle_mult(struct nvhost_device *dev, int refs) } } +static void powerstate_down_handler(struct work_struct *work) +{ + struct nvhost_device *dev; + + dev = container_of(to_delayed_work(work), struct nvhost_device, + powerstate_down); + + mutex_lock(&dev->lock); + if (dev->can_powergate) + to_state_powergated_locked(dev, false); + mutex_unlock(&dev->lock); +} + int nvhost_module_get_rate(struct nvhost_device *dev, unsigned long *rate, int index) { @@ -255,7 +243,6 @@ int nvhost_module_get_rate(struct nvhost_device *dev, unsigned long *rate, *rate = clk_get_rate(c); nvhost_module_idle(dev); return 0; - } static int nvhost_module_update_rate(struct nvhost_device *dev, int index) @@ -293,7 +280,6 @@ int nvhost_module_set_rate(struct nvhost_device *dev, void *priv, ret = nvhost_module_update_rate(dev, index); mutex_unlock(&client_list_lock); return ret; - } int nvhost_module_add_client(struct nvhost_device *dev, void *priv) @@ -366,29 +352,23 @@ int nvhost_module_init(struct nvhost_device *dev) mutex_init(&dev->lock); init_waitqueue_head(&dev->idle_wq); - INIT_DELAYED_WORK(&dev->powerstate_down, powerstate_down_handler); /* power gate units that we can power gate */ if (dev->can_powergate) { do_powergate_locked(dev->powergate_ids[0]); do_powergate_locked(dev->powergate_ids[1]); - dev->powerstate = NVHOST_POWER_STATE_POWERGATED; + INIT_DELAYED_WORK(&dev->powerstate_down, powerstate_down_handler); + dev->powered = false; } else { do_unpowergate_locked(dev->powergate_ids[0]); do_unpowergate_locked(dev->powergate_ids[1]); - dev->powerstate = NVHOST_POWER_STATE_CLOCKGATED; + dev->powered = true; } - return 0; -} + /* enable runtime pm */ + nvhost_module_resume(dev); -static int is_module_idle(struct nvhost_device *dev) -{ - int count; - mutex_lock(&dev->lock); - count = dev->refcount; - mutex_unlock(&dev->lock); - return (count == 0); + return 0; } static void debug_not_idle(struct nvhost_master *host) @@ -401,8 +381,8 @@ static void debug_not_idle(struct nvhost_master *host) mutex_lock(&dev->lock); if (dev->name) dev_warn(&host->pdev->dev, - "tegra_grhost: %s: refcnt %d\n", - dev->name, dev->refcount); + "tegra_grhost: %s: refcnt %d\n", dev->name, + atomic_read(&dev->dev.power.usage_count)); mutex_unlock(&dev->lock); } @@ -424,10 +404,10 @@ int nvhost_module_suspend(struct nvhost_device *dev, bool system_suspend) int ret; struct nvhost_master *host = nvhost_get_host(dev); - if (system_suspend && !is_module_idle(dev)) + if (system_suspend && !is_module_idle(dev, system_suspend)) debug_not_idle(host); - ret = wait_event_timeout(dev->idle_wq, is_module_idle(dev), + ret = wait_event_timeout(dev->idle_wq, is_module_idle(dev, system_suspend), ACM_SUSPEND_WAIT_FOR_IDLE_TIMEOUT); if (ret == 0) { dev_info(&dev->dev, "%s prevented suspend\n", @@ -436,16 +416,20 @@ int nvhost_module_suspend(struct nvhost_device *dev, bool system_suspend) } if (system_suspend) - dev_dbg(&dev->dev, "tegra_grhost: entered idle\n"); + dev_info(&dev->dev, "tegra_grhost: entered idle\n"); mutex_lock(&dev->lock); - cancel_delayed_work(&dev->powerstate_down); - to_state_powergated_locked(dev); + if (dev->can_powergate) + cancel_delayed_work_sync(&dev->powerstate_down); + to_state_powergated_locked(dev, system_suspend); mutex_unlock(&dev->lock); if (dev->suspend) dev->suspend(dev); + if (system_suspend) + pm_runtime_set_suspended(&dev->dev); + return 0; } @@ -459,6 +443,10 @@ void nvhost_module_deinit(struct nvhost_device *dev) nvhost_module_suspend(dev, false); for (i = 0; i < dev->num_clks; i++) clk_put(dev->clk[i]); - dev->powerstate = NVHOST_POWER_STATE_DEINIT; } +void nvhost_module_resume(struct nvhost_device *dev) +{ + pm_runtime_set_autosuspend_delay(&dev->dev, dev->clockgate_delay); + pm_runtime_use_autosuspend(&dev->dev); +} |