From 63fffb8678658281037f6aea0597a3e944887af9 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 9 May 2005 14:10:26 -0700 Subject: ARM: Make low-level printk work Makes low-level printk work. Signed-off-by: Tony Lindgren --- kernel/printk.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 28a40d8171b8..3e565d1cd9da 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -53,6 +53,10 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) +#ifdef CONFIG_DEBUG_LL +extern void printascii(char *); +#endif + /* printk's without a loglevel use this.. */ #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL @@ -876,6 +880,10 @@ asmlinkage int vprintk(const char *fmt, va_list args) printed_len += vscnprintf(printk_buf + printed_len, sizeof(printk_buf) - printed_len, fmt, args); +#ifdef CONFIG_DEBUG_LL + printascii(printk_buf); +#endif + p = printk_buf; /* Read log level and handle special printk prefix */ -- cgit v1.2.3 From a841be8af78880e00c8ae57f8440b67a1d128365 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Tue, 17 Feb 2009 14:51:02 -0800 Subject: mm: Add min_free_order_shift tunable. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit By default the kernel tries to keep half as much memory free at each order as it does for one order below. This can be too agressive when running without swap. Change-Id: I5efc1a0b50f41ff3ac71e92d2efd175dedd54ead Signed-off-by: Arve Hjønnevåg --- kernel/sysctl.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 11d65b531e50..fd15163f360a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -96,6 +96,7 @@ extern char core_pattern[]; extern unsigned int core_pipe_limit; extern int pid_max; extern int min_free_kbytes; +extern int min_free_order_shift; extern int pid_max_min, pid_max_max; extern int sysctl_drop_caches; extern int percpu_pagelist_fraction; @@ -1188,6 +1189,13 @@ static struct ctl_table vm_table[] = { .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = &zero, }, + { + .procname = "min_free_order_shift", + .data = &min_free_order_shift, + .maxlen = sizeof(min_free_order_shift), + .mode = 0644, + .proc_handler = &proc_dointvec + }, { .procname = "percpu_pagelist_fraction", .data = &percpu_pagelist_fraction, -- cgit v1.2.3 From e10525beeb7f0f0baf70efd36063de728b35e603 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Tue, 8 May 2007 15:39:13 +0700 Subject: Add build option to to set the default panic timeout. --- kernel/panic.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index d7bb6974efb5..cdb86b128328 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -33,7 +33,10 @@ static int pause_on_oops; static int pause_on_oops_flag; static DEFINE_SPINLOCK(pause_on_oops_lock); -int panic_timeout; +#ifndef CONFIG_PANIC_TIMEOUT +#define CONFIG_PANIC_TIMEOUT 0 +#endif +int panic_timeout = CONFIG_PANIC_TIMEOUT; EXPORT_SYMBOL_GPL(panic_timeout); ATOMIC_NOTIFIER_HEAD(panic_notifier_list); -- cgit v1.2.3 From 6bd233df04316d8d3ef8445a4c45f8b8afda42d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Wed, 10 Dec 2008 20:06:28 -0800 Subject: sched: Enable might_sleep before initializing drivers. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows detection of init bugs in built-in drivers. Signed-off-by: Arve Hjønnevåg --- kernel/sched.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index b50b0f0c9aa9..d0f600c94843 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8202,12 +8202,23 @@ static inline int preempt_count_equals(int preempt_offset) return (nested == preempt_offset); } +static int __might_sleep_init_called; +int __init __might_sleep_init(void) +{ + __might_sleep_init_called = 1; + return 0; +} +early_initcall(__might_sleep_init); + void __might_sleep(const char *file, int line, int preempt_offset) { static unsigned long prev_jiffy; /* ratelimiting */ if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || - system_state != SYSTEM_RUNNING || oops_in_progress) + oops_in_progress) + return; + if (system_state != SYSTEM_RUNNING && + (!__might_sleep_init_called || system_state != SYSTEM_BOOTING)) return; if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; -- cgit v1.2.3 From b680227f20c5cbb3ee962c14cdeb7599c90aa861 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Tue, 9 Sep 2008 22:14:34 -0700 Subject: PM: Implement wakelock api. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PM: wakelock: Replace expire work with a timer The expire work function did not work in the normal case. Signed-off-by: Arve Hjønnevåg --- kernel/power/Kconfig | 19 ++ kernel/power/Makefile | 1 + kernel/power/power.h | 7 + kernel/power/wakelock.c | 602 ++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 629 insertions(+) create mode 100644 kernel/power/wakelock.c (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 3744c594b19b..836b0bb196e1 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -18,6 +18,25 @@ config SUSPEND_FREEZER Turning OFF this setting is NOT recommended! If in doubt, say Y. +config HAS_WAKELOCK + bool + +config WAKELOCK + bool "Wake lock" + depends on PM && RTC_CLASS + default n + select HAS_WAKELOCK + ---help--- + Enable wakelocks. When user space request a sleep state the + sleep request will be delayed until no wake locks are held. + +config WAKELOCK_STAT + bool "Wake lock stats" + depends on WAKELOCK + default y + ---help--- + Report wake lock stats in /proc/wakelocks + config HIBERNATE_CALLBACKS bool diff --git a/kernel/power/Makefile b/kernel/power/Makefile index c5ebc6a90643..e58fcac346fe 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -8,5 +8,6 @@ obj-$(CONFIG_SUSPEND) += suspend.o obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ block_io.o +obj-$(CONFIG_WAKELOCK) += wakelock.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/power.h b/kernel/power/power.h index 9a00a0a26280..a48dd0277ed2 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -245,3 +245,10 @@ static inline void suspend_thaw_processes(void) { } #endif + +#ifdef CONFIG_WAKELOCK +/* kernel/power/wakelock.c */ +extern struct workqueue_struct *suspend_work_queue; +extern struct wake_lock main_wake_lock; +extern suspend_state_t requested_suspend_state; +#endif diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c new file mode 100644 index 000000000000..b24eb7594ef2 --- /dev/null +++ b/kernel/power/wakelock.c @@ -0,0 +1,602 @@ +/* kernel/power/wakelock.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include /* sys_sync */ +#include +#ifdef CONFIG_WAKELOCK_STAT +#include +#endif +#include "power.h" + +enum { + DEBUG_EXIT_SUSPEND = 1U << 0, + DEBUG_WAKEUP = 1U << 1, + DEBUG_SUSPEND = 1U << 2, + DEBUG_EXPIRE = 1U << 3, + DEBUG_WAKE_LOCK = 1U << 4, +}; +static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +#define WAKE_LOCK_TYPE_MASK (0x0f) +#define WAKE_LOCK_INITIALIZED (1U << 8) +#define WAKE_LOCK_ACTIVE (1U << 9) +#define WAKE_LOCK_AUTO_EXPIRE (1U << 10) +#define WAKE_LOCK_PREVENTING_SUSPEND (1U << 11) + +static DEFINE_SPINLOCK(list_lock); +static LIST_HEAD(inactive_locks); +static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT]; +static int current_event_num; +struct workqueue_struct *suspend_work_queue; +struct wake_lock main_wake_lock; +suspend_state_t requested_suspend_state = PM_SUSPEND_MEM; +static struct wake_lock unknown_wakeup; + +#ifdef CONFIG_WAKELOCK_STAT +static struct wake_lock deleted_wake_locks; +static ktime_t last_sleep_time_update; +static int wait_for_wakeup; + +int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) +{ + struct timespec ts; + struct timespec kt; + struct timespec tomono; + struct timespec delta; + unsigned long seq; + long timeout; + + if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE)) + return 0; + do { + seq = read_seqbegin(&xtime_lock); + timeout = lock->expires - jiffies; + if (timeout > 0) + return 0; + kt = current_kernel_time(); + tomono = wall_to_monotonic; + } while (read_seqretry(&xtime_lock, seq)); + jiffies_to_timespec(-timeout, &delta); + set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec, + kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec); + *expire_time = timespec_to_ktime(ts); + return 1; +} + + +static int print_lock_stat(char *buf, struct wake_lock *lock) +{ + int lock_count = lock->stat.count; + int expire_count = lock->stat.expire_count; + ktime_t active_time = ktime_set(0, 0); + ktime_t total_time = lock->stat.total_time; + ktime_t max_time = lock->stat.max_time; + ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time; + if (lock->flags & WAKE_LOCK_ACTIVE) { + ktime_t now, add_time; + int expired = get_expired_time(lock, &now); + if (!expired) + now = ktime_get(); + add_time = ktime_sub(now, lock->stat.last_time); + lock_count++; + if (!expired) + active_time = add_time; + else + expire_count++; + total_time = ktime_add(total_time, add_time); + if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) + prevent_suspend_time = ktime_add(prevent_suspend_time, + ktime_sub(now, last_sleep_time_update)); + if (add_time.tv64 > max_time.tv64) + max_time = add_time; + } + + return sprintf(buf, "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t" + "%lld\n", lock->name, lock_count, expire_count, + lock->stat.wakeup_count, ktime_to_ns(active_time), + ktime_to_ns(total_time), + ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time), + ktime_to_ns(lock->stat.last_time)); +} + + +static int wakelocks_read_proc(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + unsigned long irqflags; + struct wake_lock *lock; + int len = 0; + char *p = page; + int type; + + spin_lock_irqsave(&list_lock, irqflags); + + p += sprintf(p, "name\tcount\texpire_count\twake_count\tactive_since" + "\ttotal_time\tsleep_time\tmax_time\tlast_change\n"); + list_for_each_entry(lock, &inactive_locks, link) { + p += print_lock_stat(p, lock); + } + for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) { + list_for_each_entry(lock, &active_wake_locks[type], link) + p += print_lock_stat(p, lock); + } + spin_unlock_irqrestore(&list_lock, irqflags); + + *start = page + off; + + len = p - page; + if (len > off) + len -= off; + else + len = 0; + + return len < count ? len : count; +} + +static void wake_unlock_stat_locked(struct wake_lock *lock, int expired) +{ + ktime_t duration; + ktime_t now; + if (!(lock->flags & WAKE_LOCK_ACTIVE)) + return; + if (get_expired_time(lock, &now)) + expired = 1; + else + now = ktime_get(); + lock->stat.count++; + if (expired) + lock->stat.expire_count++; + duration = ktime_sub(now, lock->stat.last_time); + lock->stat.total_time = ktime_add(lock->stat.total_time, duration); + if (ktime_to_ns(duration) > ktime_to_ns(lock->stat.max_time)) + lock->stat.max_time = duration; + lock->stat.last_time = ktime_get(); + if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) { + duration = ktime_sub(now, last_sleep_time_update); + lock->stat.prevent_suspend_time = ktime_add( + lock->stat.prevent_suspend_time, duration); + lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND; + } +} + +static void update_sleep_wait_stats_locked(int done) +{ + struct wake_lock *lock; + ktime_t now, etime, elapsed, add; + int expired; + + now = ktime_get(); + elapsed = ktime_sub(now, last_sleep_time_update); + list_for_each_entry(lock, &active_wake_locks[WAKE_LOCK_SUSPEND], link) { + expired = get_expired_time(lock, &etime); + if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) { + if (expired) + add = ktime_sub(etime, last_sleep_time_update); + else + add = elapsed; + lock->stat.prevent_suspend_time = ktime_add( + lock->stat.prevent_suspend_time, add); + } + if (done || expired) + lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND; + else + lock->flags |= WAKE_LOCK_PREVENTING_SUSPEND; + } + last_sleep_time_update = now; +} +#endif + + +static void expire_wake_lock(struct wake_lock *lock) +{ +#ifdef CONFIG_WAKELOCK_STAT + wake_unlock_stat_locked(lock, 1); +#endif + lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE); + list_del(&lock->link); + list_add(&lock->link, &inactive_locks); + if (debug_mask & (DEBUG_WAKE_LOCK | DEBUG_EXPIRE)) + pr_info("expired wake lock %s\n", lock->name); +} + +static void print_active_locks(int type) +{ + unsigned long irqflags; + struct wake_lock *lock; + + BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); + spin_lock_irqsave(&list_lock, irqflags); + list_for_each_entry(lock, &active_wake_locks[type], link) { + if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { + long timeout = lock->expires - jiffies; + if (timeout <= 0) + pr_info("wake lock %s, expired\n", lock->name); + else + pr_info("active wake lock %s, time left %ld\n", + lock->name, timeout); + } else + pr_info("active wake lock %s\n", lock->name); + } + spin_unlock_irqrestore(&list_lock, irqflags); +} + +static long has_wake_lock_locked(int type) +{ + struct wake_lock *lock, *n; + long max_timeout = 0; + + BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); + list_for_each_entry_safe(lock, n, &active_wake_locks[type], link) { + if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { + long timeout = lock->expires - jiffies; + if (timeout <= 0) + expire_wake_lock(lock); + else if (timeout > max_timeout) + max_timeout = timeout; + } else + return -1; + } + return max_timeout; +} + +long has_wake_lock(int type) +{ + long ret; + unsigned long irqflags; + spin_lock_irqsave(&list_lock, irqflags); + ret = has_wake_lock_locked(type); + spin_unlock_irqrestore(&list_lock, irqflags); + return ret; +} + +static void suspend(struct work_struct *work) +{ + int ret; + int entry_event_num; + + if (has_wake_lock(WAKE_LOCK_SUSPEND)) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("suspend: abort suspend\n"); + return; + } + + entry_event_num = current_event_num; + sys_sync(); + if (debug_mask & DEBUG_SUSPEND) + pr_info("suspend: enter suspend\n"); + ret = pm_suspend(requested_suspend_state); + if (debug_mask & DEBUG_EXIT_SUSPEND) { + struct timespec ts; + struct rtc_time tm; + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_info("suspend: exit suspend, ret = %d " + "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + } + if (current_event_num == entry_event_num) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("suspend: pm_suspend returned with no event\n"); + wake_lock_timeout(&unknown_wakeup, HZ / 2); + } +} +static DECLARE_WORK(suspend_work, suspend); + +static void expire_wake_locks(unsigned long data) +{ + long has_lock; + unsigned long irqflags; + if (debug_mask & DEBUG_EXPIRE) + pr_info("expire_wake_locks: start\n"); + if (debug_mask & DEBUG_SUSPEND) + print_active_locks(WAKE_LOCK_SUSPEND); + spin_lock_irqsave(&list_lock, irqflags); + has_lock = has_wake_lock_locked(WAKE_LOCK_SUSPEND); + if (debug_mask & DEBUG_EXPIRE) + pr_info("expire_wake_locks: done, has_lock %ld\n", has_lock); + if (has_lock == 0) + queue_work(suspend_work_queue, &suspend_work); + spin_unlock_irqrestore(&list_lock, irqflags); +} +static DEFINE_TIMER(expire_timer, expire_wake_locks, 0, 0); + +static int power_suspend_late(struct device *dev) +{ + int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0; +#ifdef CONFIG_WAKELOCK_STAT + wait_for_wakeup = 1; +#endif + if (debug_mask & DEBUG_SUSPEND) + pr_info("power_suspend_late return %d\n", ret); + return ret; +} + +static struct dev_pm_ops power_driver_pm_ops = { + .suspend_noirq = power_suspend_late, +}; + +static struct platform_driver power_driver = { + .driver.name = "power", + .driver.pm = &power_driver_pm_ops, +}; +static struct platform_device power_device = { + .name = "power", +}; + +void wake_lock_init(struct wake_lock *lock, int type, const char *name) +{ + unsigned long irqflags = 0; + + if (name) + lock->name = name; + BUG_ON(!lock->name); + + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_lock_init name=%s\n", lock->name); +#ifdef CONFIG_WAKELOCK_STAT + lock->stat.count = 0; + lock->stat.expire_count = 0; + lock->stat.wakeup_count = 0; + lock->stat.total_time = ktime_set(0, 0); + lock->stat.prevent_suspend_time = ktime_set(0, 0); + lock->stat.max_time = ktime_set(0, 0); + lock->stat.last_time = ktime_set(0, 0); +#endif + lock->flags = (type & WAKE_LOCK_TYPE_MASK) | WAKE_LOCK_INITIALIZED; + + INIT_LIST_HEAD(&lock->link); + spin_lock_irqsave(&list_lock, irqflags); + list_add(&lock->link, &inactive_locks); + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(wake_lock_init); + +void wake_lock_destroy(struct wake_lock *lock) +{ + unsigned long irqflags; + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_lock_destroy name=%s\n", lock->name); + spin_lock_irqsave(&list_lock, irqflags); + lock->flags &= ~WAKE_LOCK_INITIALIZED; +#ifdef CONFIG_WAKELOCK_STAT + if (lock->stat.count) { + deleted_wake_locks.stat.count += lock->stat.count; + deleted_wake_locks.stat.expire_count += lock->stat.expire_count; + deleted_wake_locks.stat.total_time = + ktime_add(deleted_wake_locks.stat.total_time, + lock->stat.total_time); + deleted_wake_locks.stat.prevent_suspend_time = + ktime_add(deleted_wake_locks.stat.prevent_suspend_time, + lock->stat.prevent_suspend_time); + deleted_wake_locks.stat.max_time = + ktime_add(deleted_wake_locks.stat.max_time, + lock->stat.max_time); + } +#endif + list_del(&lock->link); + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(wake_lock_destroy); + +static void wake_lock_internal( + struct wake_lock *lock, long timeout, int has_timeout) +{ + int type; + unsigned long irqflags; + long expire_in; + + spin_lock_irqsave(&list_lock, irqflags); + type = lock->flags & WAKE_LOCK_TYPE_MASK; + BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); + BUG_ON(!(lock->flags & WAKE_LOCK_INITIALIZED)); +#ifdef CONFIG_WAKELOCK_STAT + if (type == WAKE_LOCK_SUSPEND && wait_for_wakeup) { + if (debug_mask & DEBUG_WAKEUP) + pr_info("wakeup wake lock: %s\n", lock->name); + wait_for_wakeup = 0; + lock->stat.wakeup_count++; + } + if ((lock->flags & WAKE_LOCK_AUTO_EXPIRE) && + (long)(lock->expires - jiffies) <= 0) { + wake_unlock_stat_locked(lock, 0); + lock->stat.last_time = ktime_get(); + } +#endif + if (!(lock->flags & WAKE_LOCK_ACTIVE)) { + lock->flags |= WAKE_LOCK_ACTIVE; +#ifdef CONFIG_WAKELOCK_STAT + lock->stat.last_time = ktime_get(); +#endif + } + list_del(&lock->link); + if (has_timeout) { + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_lock: %s, type %d, timeout %ld.%03lu\n", + lock->name, type, timeout / HZ, + (timeout % HZ) * MSEC_PER_SEC / HZ); + lock->expires = jiffies + timeout; + lock->flags |= WAKE_LOCK_AUTO_EXPIRE; + list_add_tail(&lock->link, &active_wake_locks[type]); + } else { + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_lock: %s, type %d\n", lock->name, type); + lock->expires = LONG_MAX; + lock->flags &= ~WAKE_LOCK_AUTO_EXPIRE; + list_add(&lock->link, &active_wake_locks[type]); + } + if (type == WAKE_LOCK_SUSPEND) { + current_event_num++; +#ifdef CONFIG_WAKELOCK_STAT + if (lock == &main_wake_lock) + update_sleep_wait_stats_locked(1); + else if (!wake_lock_active(&main_wake_lock)) + update_sleep_wait_stats_locked(0); +#endif + if (has_timeout) + expire_in = has_wake_lock_locked(type); + else + expire_in = -1; + if (expire_in > 0) { + if (debug_mask & DEBUG_EXPIRE) + pr_info("wake_lock: %s, start expire timer, " + "%ld\n", lock->name, expire_in); + mod_timer(&expire_timer, jiffies + expire_in); + } else { + if (del_timer(&expire_timer)) + if (debug_mask & DEBUG_EXPIRE) + pr_info("wake_lock: %s, stop expire timer\n", + lock->name); + if (expire_in == 0) + queue_work(suspend_work_queue, &suspend_work); + } + } + spin_unlock_irqrestore(&list_lock, irqflags); +} + +void wake_lock(struct wake_lock *lock) +{ + wake_lock_internal(lock, 0, 0); +} +EXPORT_SYMBOL(wake_lock); + +void wake_lock_timeout(struct wake_lock *lock, long timeout) +{ + wake_lock_internal(lock, timeout, 1); +} +EXPORT_SYMBOL(wake_lock_timeout); + +void wake_unlock(struct wake_lock *lock) +{ + int type; + unsigned long irqflags; + spin_lock_irqsave(&list_lock, irqflags); + type = lock->flags & WAKE_LOCK_TYPE_MASK; +#ifdef CONFIG_WAKELOCK_STAT + wake_unlock_stat_locked(lock, 0); +#endif + if (debug_mask & DEBUG_WAKE_LOCK) + pr_info("wake_unlock: %s\n", lock->name); + lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE); + list_del(&lock->link); + list_add(&lock->link, &inactive_locks); + if (type == WAKE_LOCK_SUSPEND) { + long has_lock = has_wake_lock_locked(type); + if (has_lock > 0) { + if (debug_mask & DEBUG_EXPIRE) + pr_info("wake_unlock: %s, start expire timer, " + "%ld\n", lock->name, has_lock); + mod_timer(&expire_timer, jiffies + has_lock); + } else { + if (del_timer(&expire_timer)) + if (debug_mask & DEBUG_EXPIRE) + pr_info("wake_unlock: %s, stop expire " + "timer\n", lock->name); + if (has_lock == 0) + queue_work(suspend_work_queue, &suspend_work); + } + if (lock == &main_wake_lock) { + if (debug_mask & DEBUG_SUSPEND) + print_active_locks(WAKE_LOCK_SUSPEND); +#ifdef CONFIG_WAKELOCK_STAT + update_sleep_wait_stats_locked(0); +#endif + } + } + spin_unlock_irqrestore(&list_lock, irqflags); +} +EXPORT_SYMBOL(wake_unlock); + +int wake_lock_active(struct wake_lock *lock) +{ + return !!(lock->flags & WAKE_LOCK_ACTIVE); +} +EXPORT_SYMBOL(wake_lock_active); + +static int __init wakelocks_init(void) +{ + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(active_wake_locks); i++) + INIT_LIST_HEAD(&active_wake_locks[i]); + +#ifdef CONFIG_WAKELOCK_STAT + wake_lock_init(&deleted_wake_locks, WAKE_LOCK_SUSPEND, + "deleted_wake_locks"); +#endif + wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main"); + wake_lock(&main_wake_lock); + wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups"); + + ret = platform_device_register(&power_device); + if (ret) { + pr_err("wakelocks_init: platform_device_register failed\n"); + goto err_platform_device_register; + } + ret = platform_driver_register(&power_driver); + if (ret) { + pr_err("wakelocks_init: platform_driver_register failed\n"); + goto err_platform_driver_register; + } + + suspend_work_queue = create_singlethread_workqueue("suspend"); + if (suspend_work_queue == NULL) { + ret = -ENOMEM; + goto err_suspend_work_queue; + } + +#ifdef CONFIG_WAKELOCK_STAT + create_proc_read_entry("wakelocks", S_IRUGO, NULL, + wakelocks_read_proc, NULL); +#endif + + return 0; + +err_suspend_work_queue: + platform_driver_unregister(&power_driver); +err_platform_driver_register: + platform_device_unregister(&power_device); +err_platform_device_register: + wake_lock_destroy(&unknown_wakeup); + wake_lock_destroy(&main_wake_lock); +#ifdef CONFIG_WAKELOCK_STAT + wake_lock_destroy(&deleted_wake_locks); +#endif + return ret; +} + +static void __exit wakelocks_exit(void) +{ +#ifdef CONFIG_WAKELOCK_STAT + remove_proc_entry("wakelocks", NULL); +#endif + destroy_workqueue(suspend_work_queue); + platform_driver_unregister(&power_driver); + platform_device_unregister(&power_device); + wake_lock_destroy(&unknown_wakeup); + wake_lock_destroy(&main_wake_lock); +#ifdef CONFIG_WAKELOCK_STAT + wake_lock_destroy(&deleted_wake_locks); +#endif +} + +core_initcall(wakelocks_init); +module_exit(wakelocks_exit); -- cgit v1.2.3 From 0c87f5efe31f9cfe0493f65914dee989577e9177 Mon Sep 17 00:00:00 2001 From: Mike Chan Date: Tue, 25 Aug 2009 18:10:32 -0700 Subject: power: Prevent spinlock recursion when wake_unlock() is called Signed-off-by: Mike Chan --- kernel/power/wakelock.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index b24eb7594ef2..62714d75c3c9 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -216,13 +216,13 @@ static void expire_wake_lock(struct wake_lock *lock) pr_info("expired wake lock %s\n", lock->name); } +/* Caller must acquire the list_lock spinlock */ static void print_active_locks(int type) { unsigned long irqflags; struct wake_lock *lock; BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); - spin_lock_irqsave(&list_lock, irqflags); list_for_each_entry(lock, &active_wake_locks[type], link) { if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { long timeout = lock->expires - jiffies; @@ -234,7 +234,6 @@ static void print_active_locks(int type) } else pr_info("active wake lock %s\n", lock->name); } - spin_unlock_irqrestore(&list_lock, irqflags); } static long has_wake_lock_locked(int type) @@ -306,9 +305,9 @@ static void expire_wake_locks(unsigned long data) unsigned long irqflags; if (debug_mask & DEBUG_EXPIRE) pr_info("expire_wake_locks: start\n"); + spin_lock_irqsave(&list_lock, irqflags); if (debug_mask & DEBUG_SUSPEND) print_active_locks(WAKE_LOCK_SUSPEND); - spin_lock_irqsave(&list_lock, irqflags); has_lock = has_wake_lock_locked(WAKE_LOCK_SUSPEND); if (debug_mask & DEBUG_EXPIRE) pr_info("expire_wake_locks: done, has_lock %ld\n", has_lock); -- cgit v1.2.3 From 7dc8525106e469b36126f54bf0b50bf190abc9a7 Mon Sep 17 00:00:00 2001 From: Erik Gilling Date: Tue, 25 Aug 2009 20:09:12 -0700 Subject: power: wakelocks: fix buffer overflow in print_wake_locks Change-Id: Ic944e3b3d3bc53eddc6fd0963565fd072cac373c Signed-off-by: Erik Gilling --- kernel/power/wakelock.c | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 62714d75c3c9..263de24f8019 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -40,6 +40,8 @@ module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define WAKE_LOCK_AUTO_EXPIRE (1U << 10) #define WAKE_LOCK_PREVENTING_SUSPEND (1U << 11) +#define TOO_MAY_LOCKS_WARNING "\n\ntoo many wakelocks!!!\n" + static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(inactive_locks); static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT]; @@ -81,13 +83,15 @@ int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) } -static int print_lock_stat(char *buf, struct wake_lock *lock) +static int print_lock_stat(char *buf, int len, struct wake_lock *lock) { int lock_count = lock->stat.count; int expire_count = lock->stat.expire_count; ktime_t active_time = ktime_set(0, 0); ktime_t total_time = lock->stat.total_time; ktime_t max_time = lock->stat.max_time; + int n; + ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time; if (lock->flags & WAKE_LOCK_ACTIVE) { ktime_t now, add_time; @@ -108,12 +112,15 @@ static int print_lock_stat(char *buf, struct wake_lock *lock) max_time = add_time; } - return sprintf(buf, "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t" - "%lld\n", lock->name, lock_count, expire_count, - lock->stat.wakeup_count, ktime_to_ns(active_time), - ktime_to_ns(total_time), - ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time), - ktime_to_ns(lock->stat.last_time)); + n = snprintf(buf, len, + "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t%lld\n", + lock->name, lock_count, expire_count, + lock->stat.wakeup_count, ktime_to_ns(active_time), + ktime_to_ns(total_time), + ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time), + ktime_to_ns(lock->stat.last_time)); + + return n > len ? len : n; } @@ -123,31 +130,30 @@ static int wakelocks_read_proc(char *page, char **start, off_t off, unsigned long irqflags; struct wake_lock *lock; int len = 0; - char *p = page; int type; spin_lock_irqsave(&list_lock, irqflags); - p += sprintf(p, "name\tcount\texpire_count\twake_count\tactive_since" - "\ttotal_time\tsleep_time\tmax_time\tlast_change\n"); + len += snprintf(page + len, count - len, + "name\tcount\texpire_count\twake_count\tactive_since" + "\ttotal_time\tsleep_time\tmax_time\tlast_change\n"); list_for_each_entry(lock, &inactive_locks, link) { - p += print_lock_stat(p, lock); + len += print_lock_stat(page + len, count - len, lock); } for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) { list_for_each_entry(lock, &active_wake_locks[type], link) - p += print_lock_stat(p, lock); + len += print_lock_stat(page + len, count - len, lock); } spin_unlock_irqrestore(&list_lock, irqflags); - *start = page + off; + if (len == count) + memcpy(page + len - strlen(TOO_MAY_LOCKS_WARNING), + TOO_MAY_LOCKS_WARNING, + strlen(TOO_MAY_LOCKS_WARNING)); - len = p - page; - if (len > off) - len -= off; - else - len = 0; + *eof = 1; - return len < count ? len : count; + return len; } static void wake_unlock_stat_locked(struct wake_lock *lock, int expired) -- cgit v1.2.3 From d55eb4bdb051cb3a48122a92183927d3f8a0f8d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Wed, 2 Dec 2009 18:22:00 -0800 Subject: PM: wakelocks: Use seq_file for /proc/wakelocks so we can get more than 3K of stats. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I42ed8bea639684f7a8a95b2057516764075c6b01 Signed-off-by: Arve Hjønnevåg --- kernel/power/wakelock.c | 51 ++++++++++++++++++++++--------------------------- 1 file changed, 23 insertions(+), 28 deletions(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 263de24f8019..4b0e26065be0 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -40,8 +40,6 @@ module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define WAKE_LOCK_AUTO_EXPIRE (1U << 10) #define WAKE_LOCK_PREVENTING_SUSPEND (1U << 11) -#define TOO_MAY_LOCKS_WARNING "\n\ntoo many wakelocks!!!\n" - static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(inactive_locks); static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT]; @@ -83,14 +81,13 @@ int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) } -static int print_lock_stat(char *buf, int len, struct wake_lock *lock) +static int print_lock_stat(struct seq_file *m, struct wake_lock *lock) { int lock_count = lock->stat.count; int expire_count = lock->stat.expire_count; ktime_t active_time = ktime_set(0, 0); ktime_t total_time = lock->stat.total_time; ktime_t max_time = lock->stat.max_time; - int n; ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time; if (lock->flags & WAKE_LOCK_ACTIVE) { @@ -112,48 +109,34 @@ static int print_lock_stat(char *buf, int len, struct wake_lock *lock) max_time = add_time; } - n = snprintf(buf, len, + return seq_printf(m, "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t%lld\n", lock->name, lock_count, expire_count, lock->stat.wakeup_count, ktime_to_ns(active_time), ktime_to_ns(total_time), ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time), ktime_to_ns(lock->stat.last_time)); - - return n > len ? len : n; } - -static int wakelocks_read_proc(char *page, char **start, off_t off, - int count, int *eof, void *data) +static int wakelock_stats_show(struct seq_file *m, void *unused) { unsigned long irqflags; struct wake_lock *lock; - int len = 0; + int ret; int type; spin_lock_irqsave(&list_lock, irqflags); - len += snprintf(page + len, count - len, - "name\tcount\texpire_count\twake_count\tactive_since" + ret = seq_puts(m, "name\tcount\texpire_count\twake_count\tactive_since" "\ttotal_time\tsleep_time\tmax_time\tlast_change\n"); - list_for_each_entry(lock, &inactive_locks, link) { - len += print_lock_stat(page + len, count - len, lock); - } + list_for_each_entry(lock, &inactive_locks, link) + ret = print_lock_stat(m, lock); for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) { list_for_each_entry(lock, &active_wake_locks[type], link) - len += print_lock_stat(page + len, count - len, lock); + ret = print_lock_stat(m, lock); } spin_unlock_irqrestore(&list_lock, irqflags); - - if (len == count) - memcpy(page + len - strlen(TOO_MAY_LOCKS_WARNING), - TOO_MAY_LOCKS_WARNING, - strlen(TOO_MAY_LOCKS_WARNING)); - - *eof = 1; - - return len; + return 0; } static void wake_unlock_stat_locked(struct wake_lock *lock, int expired) @@ -535,6 +518,19 @@ int wake_lock_active(struct wake_lock *lock) } EXPORT_SYMBOL(wake_lock_active); +static int wakelock_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, wakelock_stats_show, NULL); +} + +static const struct file_operations wakelock_stats_fops = { + .owner = THIS_MODULE, + .open = wakelock_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static int __init wakelocks_init(void) { int ret; @@ -569,8 +565,7 @@ static int __init wakelocks_init(void) } #ifdef CONFIG_WAKELOCK_STAT - create_proc_read_entry("wakelocks", S_IRUGO, NULL, - wakelocks_read_proc, NULL); + proc_create("wakelocks", S_IRUGO, NULL, &wakelock_stats_fops); #endif return 0; -- cgit v1.2.3 From 10369c2de4b5f7d7e2e94290c230a5acc77fa2fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Tue, 7 Oct 2008 20:48:01 -0700 Subject: PM: Implement early suspend api --- kernel/power/Kconfig | 12 +++ kernel/power/Makefile | 1 + kernel/power/earlysuspend.c | 178 ++++++++++++++++++++++++++++++++++++++++++++ kernel/power/power.h | 6 ++ 4 files changed, 197 insertions(+) create mode 100644 kernel/power/earlysuspend.c (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 836b0bb196e1..06b41d8ea86d 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -21,6 +21,9 @@ config SUSPEND_FREEZER config HAS_WAKELOCK bool +config HAS_EARLYSUSPEND + bool + config WAKELOCK bool "Wake lock" depends on PM && RTC_CLASS @@ -37,6 +40,15 @@ config WAKELOCK_STAT ---help--- Report wake lock stats in /proc/wakelocks +config EARLYSUSPEND + bool "Early suspend" + depends on WAKELOCK + default y + select HAS_EARLYSUSPEND + ---help--- + Call early suspend handlers when the user requested sleep state + changes. + config HIBERNATE_CALLBACKS bool diff --git a/kernel/power/Makefile b/kernel/power/Makefile index e58fcac346fe..2d2b1bed36af 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -9,5 +9,6 @@ obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ block_io.o obj-$(CONFIG_WAKELOCK) += wakelock.o +obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c new file mode 100644 index 000000000000..84bed51dcdce --- /dev/null +++ b/kernel/power/earlysuspend.c @@ -0,0 +1,178 @@ +/* kernel/power/earlysuspend.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include /* sys_sync */ +#include +#include + +#include "power.h" + +enum { + DEBUG_USER_STATE = 1U << 0, + DEBUG_SUSPEND = 1U << 2, +}; +static int debug_mask = DEBUG_USER_STATE; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +static DEFINE_MUTEX(early_suspend_lock); +static LIST_HEAD(early_suspend_handlers); +static void early_suspend(struct work_struct *work); +static void late_resume(struct work_struct *work); +static DECLARE_WORK(early_suspend_work, early_suspend); +static DECLARE_WORK(late_resume_work, late_resume); +static DEFINE_SPINLOCK(state_lock); +enum { + SUSPEND_REQUESTED = 0x1, + SUSPENDED = 0x2, + SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED, +}; +static int state; + +void register_early_suspend(struct early_suspend *handler) +{ + struct list_head *pos; + + mutex_lock(&early_suspend_lock); + list_for_each(pos, &early_suspend_handlers) { + struct early_suspend *e; + e = list_entry(pos, struct early_suspend, link); + if (e->level > handler->level) + break; + } + list_add_tail(&handler->link, pos); + if ((state & SUSPENDED) && handler->suspend) + handler->suspend(handler); + mutex_unlock(&early_suspend_lock); +} +EXPORT_SYMBOL(register_early_suspend); + +void unregister_early_suspend(struct early_suspend *handler) +{ + mutex_lock(&early_suspend_lock); + list_del(&handler->link); + mutex_unlock(&early_suspend_lock); +} +EXPORT_SYMBOL(unregister_early_suspend); + +static void early_suspend(struct work_struct *work) +{ + struct early_suspend *pos; + unsigned long irqflags; + int abort = 0; + + mutex_lock(&early_suspend_lock); + spin_lock_irqsave(&state_lock, irqflags); + if (state == SUSPEND_REQUESTED) + state |= SUSPENDED; + else + abort = 1; + spin_unlock_irqrestore(&state_lock, irqflags); + + if (abort) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("early_suspend: abort, state %d\n", state); + mutex_unlock(&early_suspend_lock); + goto abort; + } + + if (debug_mask & DEBUG_SUSPEND) + pr_info("early_suspend: call handlers\n"); + list_for_each_entry(pos, &early_suspend_handlers, link) { + if (pos->suspend != NULL) + pos->suspend(pos); + } + mutex_unlock(&early_suspend_lock); + + if (debug_mask & DEBUG_SUSPEND) + pr_info("early_suspend: sync\n"); + + sys_sync(); +abort: + spin_lock_irqsave(&state_lock, irqflags); + if (state == SUSPEND_REQUESTED_AND_SUSPENDED) + wake_unlock(&main_wake_lock); + spin_unlock_irqrestore(&state_lock, irqflags); +} + +static void late_resume(struct work_struct *work) +{ + struct early_suspend *pos; + unsigned long irqflags; + int abort = 0; + + mutex_lock(&early_suspend_lock); + spin_lock_irqsave(&state_lock, irqflags); + if (state == SUSPENDED) + state &= ~SUSPENDED; + else + abort = 1; + spin_unlock_irqrestore(&state_lock, irqflags); + + if (abort) { + if (debug_mask & DEBUG_SUSPEND) + pr_info("late_resume: abort, state %d\n", state); + goto abort; + } + if (debug_mask & DEBUG_SUSPEND) + pr_info("late_resume: call handlers\n"); + list_for_each_entry_reverse(pos, &early_suspend_handlers, link) + if (pos->resume != NULL) + pos->resume(pos); + if (debug_mask & DEBUG_SUSPEND) + pr_info("late_resume: done\n"); +abort: + mutex_unlock(&early_suspend_lock); +} + +void request_suspend_state(suspend_state_t new_state) +{ + unsigned long irqflags; + int old_sleep; + + spin_lock_irqsave(&state_lock, irqflags); + old_sleep = state & SUSPEND_REQUESTED; + if (debug_mask & DEBUG_USER_STATE) { + struct timespec ts; + struct rtc_time tm; + getnstimeofday(&ts); + rtc_time_to_tm(ts.tv_sec, &tm); + pr_info("request_suspend_state: %s (%d->%d) at %lld " + "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", + new_state != PM_SUSPEND_ON ? "sleep" : "wakeup", + requested_suspend_state, new_state, + ktime_to_ns(ktime_get()), + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + } + if (!old_sleep && new_state != PM_SUSPEND_ON) { + state |= SUSPEND_REQUESTED; + queue_work(suspend_work_queue, &early_suspend_work); + } else if (old_sleep && new_state == PM_SUSPEND_ON) { + state &= ~SUSPEND_REQUESTED; + wake_lock(&main_wake_lock); + queue_work(suspend_work_queue, &late_resume_work); + } + requested_suspend_state = new_state; + spin_unlock_irqrestore(&state_lock, irqflags); +} + +suspend_state_t get_suspend_state(void) +{ + return requested_suspend_state; +} diff --git a/kernel/power/power.h b/kernel/power/power.h index a48dd0277ed2..491c2db0ef8b 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -252,3 +252,9 @@ extern struct workqueue_struct *suspend_work_queue; extern struct wake_lock main_wake_lock; extern suspend_state_t requested_suspend_state; #endif + +#ifdef CONFIG_EARLYSUSPEND +/* kernel/power/earlysuspend.c */ +void request_suspend_state(suspend_state_t state); +suspend_state_t get_suspend_state(void); +#endif -- cgit v1.2.3 From b766d576834f8f61dbc47e0e8e5cddf035a4bd69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Thu, 9 Oct 2008 19:17:11 -0700 Subject: PM: Enable early suspend through /sys/power/state If EARLYSUSPEND is enabled then writes to /sys/power/state no longer blocks, and the kernel will try to enter the requested state every time no wakelocks are held. Write "on" to resume normal operation. --- kernel/power/main.c | 11 +++++++++++ kernel/power/suspend.c | 3 +++ 2 files changed, 14 insertions(+) (limited to 'kernel') diff --git a/kernel/power/main.c b/kernel/power/main.c index 6c601f871964..8021065555f4 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -171,7 +171,11 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t n) { #ifdef CONFIG_SUSPEND +#ifdef CONFIG_EARLYSUSPEND + suspend_state_t state = PM_SUSPEND_ON; +#else suspend_state_t state = PM_SUSPEND_STANDBY; +#endif const char * const *s; #endif char *p; @@ -193,7 +197,14 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr, break; } if (state < PM_SUSPEND_MAX && *s) +#ifdef CONFIG_EARLYSUSPEND + if (state == PM_SUSPEND_ON || valid_state(state)) { + error = 0; + request_suspend_state(state); + } +#else error = enter_state(state); +#endif #endif Exit: diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index d3caa7634987..a6f6e3114a24 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -28,6 +28,9 @@ #include "power.h" const char *const pm_states[PM_SUSPEND_MAX] = { +#ifdef CONFIG_EARLYSUSPEND + [PM_SUSPEND_ON] = "on", +#endif [PM_SUSPEND_STANDBY] = "standby", [PM_SUSPEND_MEM] = "mem", }; -- cgit v1.2.3 From 3bd252058e9743d2426a3ffec1b75722721ef0a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Thu, 9 Oct 2008 21:01:46 -0700 Subject: PM: Add user-space wake lock api. This adds /sys/power/wake_lock and /sys/power/wake_unlock. Writing a string to wake_lock creates a wake lock the first time is sees a string and locks it. Optionally, the string can be followed by a timeout. To unlock the wake lock, write the same string to wake_unlock. Change-Id: I66c6e3fe6487d17f9c2fafde1174042e57d15cd7 --- kernel/power/Kconfig | 10 ++ kernel/power/Makefile | 1 + kernel/power/main.c | 9 ++ kernel/power/power.h | 11 +++ kernel/power/userwakelock.c | 219 ++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 250 insertions(+) create mode 100644 kernel/power/userwakelock.c (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 06b41d8ea86d..a135db5a1c19 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -40,6 +40,16 @@ config WAKELOCK_STAT ---help--- Report wake lock stats in /proc/wakelocks +config USER_WAKELOCK + bool "Userspace wake locks" + depends on WAKELOCK + default y + ---help--- + User-space wake lock api. Write "lockname" or "lockname timeout" + to /sys/power/wake_lock lock and if needed create a wake lock. + Write "lockname" to /sys/power/wake_unlock to unlock a user wake + lock. + config EARLYSUSPEND bool "Early suspend" depends on WAKELOCK diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 2d2b1bed36af..44a3db6f5ee6 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ block_io.o obj-$(CONFIG_WAKELOCK) += wakelock.o +obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/main.c b/kernel/power/main.c index 8021065555f4..3304594553ce 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -309,6 +309,11 @@ power_attr(pm_trace_dev_match); #endif /* CONFIG_PM_TRACE */ +#ifdef CONFIG_USER_WAKELOCK +power_attr(wake_lock); +power_attr(wake_unlock); +#endif + static struct attribute * g[] = { &state_attr.attr, #ifdef CONFIG_PM_TRACE @@ -321,6 +326,10 @@ static struct attribute * g[] = { #ifdef CONFIG_PM_DEBUG &pm_test_attr.attr, #endif +#ifdef CONFIG_USER_WAKELOCK + &wake_lock_attr.attr, + &wake_unlock_attr.attr, +#endif #endif NULL, }; diff --git a/kernel/power/power.h b/kernel/power/power.h index 491c2db0ef8b..b6b9006480ff 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -253,6 +253,17 @@ extern struct wake_lock main_wake_lock; extern suspend_state_t requested_suspend_state; #endif +#ifdef CONFIG_USER_WAKELOCK +ssize_t wake_lock_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf); +ssize_t wake_lock_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n); +ssize_t wake_unlock_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf); +ssize_t wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n); +#endif + #ifdef CONFIG_EARLYSUSPEND /* kernel/power/earlysuspend.c */ void request_suspend_state(suspend_state_t state); diff --git a/kernel/power/userwakelock.c b/kernel/power/userwakelock.c new file mode 100644 index 000000000000..a28a8db41468 --- /dev/null +++ b/kernel/power/userwakelock.c @@ -0,0 +1,219 @@ +/* kernel/power/userwakelock.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include + +#include "power.h" + +enum { + DEBUG_FAILURE = BIT(0), + DEBUG_ERROR = BIT(1), + DEBUG_NEW = BIT(2), + DEBUG_ACCESS = BIT(3), + DEBUG_LOOKUP = BIT(4), +}; +static int debug_mask = DEBUG_FAILURE; +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); + +static DEFINE_MUTEX(tree_lock); + +struct user_wake_lock { + struct rb_node node; + struct wake_lock wake_lock; + char name[0]; +}; +struct rb_root user_wake_locks; + +static struct user_wake_lock *lookup_wake_lock_name( + const char *buf, int allocate, long *timeoutptr) +{ + struct rb_node **p = &user_wake_locks.rb_node; + struct rb_node *parent = NULL; + struct user_wake_lock *l; + int diff; + u64 timeout; + int name_len; + const char *arg; + + /* Find length of lock name and start of optional timeout string */ + arg = buf; + while (*arg && !isspace(*arg)) + arg++; + name_len = arg - buf; + if (!name_len) + goto bad_arg; + while (isspace(*arg)) + arg++; + + /* Process timeout string */ + if (timeoutptr && *arg) { + timeout = simple_strtoull(arg, (char **)&arg, 0); + while (isspace(*arg)) + arg++; + if (*arg) + goto bad_arg; + /* convert timeout from nanoseconds to jiffies > 0 */ + timeout += (NSEC_PER_SEC / HZ) - 1; + do_div(timeout, (NSEC_PER_SEC / HZ)); + if (timeout <= 0) + timeout = 1; + *timeoutptr = timeout; + } else if (*arg) + goto bad_arg; + else if (timeoutptr) + *timeoutptr = 0; + + /* Lookup wake lock in rbtree */ + while (*p) { + parent = *p; + l = rb_entry(parent, struct user_wake_lock, node); + diff = strncmp(buf, l->name, name_len); + if (!diff && l->name[name_len]) + diff = -1; + if (debug_mask & DEBUG_ERROR) + pr_info("lookup_wake_lock_name: compare %.*s %s %d\n", + name_len, buf, l->name, diff); + + if (diff < 0) + p = &(*p)->rb_left; + else if (diff > 0) + p = &(*p)->rb_right; + else + return l; + } + + /* Allocate and add new wakelock to rbtree */ + if (!allocate) { + if (debug_mask & DEBUG_ERROR) + pr_info("lookup_wake_lock_name: %.*s not found\n", + name_len, buf); + return ERR_PTR(-EINVAL); + } + l = kzalloc(sizeof(*l) + name_len + 1, GFP_KERNEL); + if (l == NULL) { + if (debug_mask & DEBUG_FAILURE) + pr_err("lookup_wake_lock_name: failed to allocate " + "memory for %.*s\n", name_len, buf); + return ERR_PTR(-ENOMEM); + } + memcpy(l->name, buf, name_len); + if (debug_mask & DEBUG_NEW) + pr_info("lookup_wake_lock_name: new wake lock %s\n", l->name); + wake_lock_init(&l->wake_lock, WAKE_LOCK_SUSPEND, l->name); + rb_link_node(&l->node, parent, p); + rb_insert_color(&l->node, &user_wake_locks); + return l; + +bad_arg: + if (debug_mask & DEBUG_ERROR) + pr_info("lookup_wake_lock_name: wake lock, %.*s, bad arg, %s\n", + name_len, buf, arg); + return ERR_PTR(-EINVAL); +} + +ssize_t wake_lock_show( + struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + char *end = buf + PAGE_SIZE; + struct rb_node *n; + struct user_wake_lock *l; + + mutex_lock(&tree_lock); + + for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) { + l = rb_entry(n, struct user_wake_lock, node); + if (wake_lock_active(&l->wake_lock)) + s += scnprintf(s, end - s, "%s ", l->name); + } + s += scnprintf(s, end - s, "\n"); + + mutex_unlock(&tree_lock); + return (s - buf); +} + +ssize_t wake_lock_store( + struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n) +{ + long timeout; + struct user_wake_lock *l; + + mutex_lock(&tree_lock); + l = lookup_wake_lock_name(buf, 1, &timeout); + if (IS_ERR(l)) { + n = PTR_ERR(l); + goto bad_name; + } + + if (debug_mask & DEBUG_ACCESS) + pr_info("wake_lock_store: %s, timeout %ld\n", l->name, timeout); + + if (timeout) + wake_lock_timeout(&l->wake_lock, timeout); + else + wake_lock(&l->wake_lock); +bad_name: + mutex_unlock(&tree_lock); + return n; +} + + +ssize_t wake_unlock_show( + struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + char *end = buf + PAGE_SIZE; + struct rb_node *n; + struct user_wake_lock *l; + + mutex_lock(&tree_lock); + + for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) { + l = rb_entry(n, struct user_wake_lock, node); + if (!wake_lock_active(&l->wake_lock)) + s += scnprintf(s, end - s, "%s ", l->name); + } + s += scnprintf(s, end - s, "\n"); + + mutex_unlock(&tree_lock); + return (s - buf); +} + +ssize_t wake_unlock_store( + struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t n) +{ + struct user_wake_lock *l; + + mutex_lock(&tree_lock); + l = lookup_wake_lock_name(buf, 0, NULL); + if (IS_ERR(l)) { + n = PTR_ERR(l); + goto not_found; + } + + if (debug_mask & DEBUG_ACCESS) + pr_info("wake_unlock_store: %s\n", l->name); + + wake_unlock(&l->wake_lock); +not_found: + mutex_unlock(&tree_lock); + return n; +} + -- cgit v1.2.3 From c5dc99a52287d2754f9995a307418193b30ed7b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Tue, 14 Oct 2008 16:02:39 -0700 Subject: PM: wakelock: Abort task freezing if a wake lock is held. Avoids a problem where the device sometimes hangs for 20 seconds before the screen is turned on. --- kernel/power/process.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'kernel') diff --git a/kernel/power/process.c b/kernel/power/process.c index 0cf3a27a6c9d..be2caaae2944 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -16,6 +16,7 @@ #include #include #include +#include /* * Timeout for stopping processes @@ -82,6 +83,10 @@ static int try_to_freeze_tasks(bool sig_only) todo += wq_busy; } + if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) { + wakeup = 1; + break; + } if (!todo || time_after(jiffies, end_time)) break; @@ -108,13 +113,18 @@ static int try_to_freeze_tasks(bool sig_only) * and caller must call thaw_processes() if something fails), * but it cleans up leftover PF_FREEZE requests. */ - printk("\n"); - printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds " - "(%d tasks refusing to freeze, wq_busy=%d):\n", - wakeup ? "aborted" : "failed", - elapsed_csecs / 100, elapsed_csecs % 100, - todo - wq_busy, wq_busy); - + if(wakeup) { + printk("\n"); + printk(KERN_ERR "Freezing of %s aborted\n", + sig_only ? "user space " : "tasks "); + } + else { + printk("\n"); + printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " + "(%d tasks refusing to freeze, wq_busy=%d):\n", + elapsed_csecs / 100, elapsed_csecs % 100, + todo - wq_busy, wq_busy); + } thaw_workqueues(); read_lock(&tasklist_lock); -- cgit v1.2.3 From 1624b44c228fd35c354c3f5086f0be8aa8f2ad74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Tue, 1 Dec 2009 20:54:37 -0800 Subject: PM: wakelock: Don't dump unfrozen task list when aborting try_to_freeze_tasks after less than one second MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: Ib2976e5b97a5ee4ec9abd4d4443584d9257d0941 Signed-off-by: Arve Hjønnevåg --- kernel/power/process.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/process.c b/kernel/power/process.c index be2caaae2944..31338cdeafc4 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -130,7 +130,8 @@ static int try_to_freeze_tasks(bool sig_only) read_lock(&tasklist_lock); do_each_thread(g, p) { task_lock(p); - if (!wakeup && freezing(p) && !freezer_should_skip(p)) + if (freezing(p) && !freezer_should_skip(p) && + elapsed_csecs > 100) sched_show_task(p); cancel_freezing(p); task_unlock(p); -- cgit v1.2.3 From f170b03d86863da07960f53a4076a64380b1dc74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Wed, 15 Oct 2008 17:52:20 -0700 Subject: PM: earlysuspend: Add console switch when user requested sleep state changes. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Arve Hjønnevåg --- kernel/power/Kconfig | 17 +++++++++ kernel/power/Makefile | 1 + kernel/power/consoleearlysuspend.c | 78 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) create mode 100644 kernel/power/consoleearlysuspend.c (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index a135db5a1c19..eec7034a359a 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -59,6 +59,23 @@ config EARLYSUSPEND Call early suspend handlers when the user requested sleep state changes. +choice + prompt "User-space screen access" + default CONSOLE_EARLYSUSPEND + depends on HAS_EARLYSUSPEND + + config NO_USER_SPACE_SCREEN_ACCESS_CONTROL + bool "None" + + config CONSOLE_EARLYSUSPEND + bool "Console switch on early-suspend" + depends on HAS_EARLYSUSPEND && VT + ---help--- + Register early suspend handler to perform a console switch to + when user-space should stop drawing to the screen and a switch + back when it should resume. +endchoice + config HIBERNATE_CALLBACKS bool diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 44a3db6f5ee6..0d1c5e4fda19 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -11,5 +11,6 @@ obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ obj-$(CONFIG_WAKELOCK) += wakelock.o obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o +obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/consoleearlysuspend.c b/kernel/power/consoleearlysuspend.c new file mode 100644 index 000000000000..a8befb419158 --- /dev/null +++ b/kernel/power/consoleearlysuspend.c @@ -0,0 +1,78 @@ +/* kernel/power/consoleearlysuspend.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#define EARLY_SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) + +static int orig_fgconsole; +static void console_early_suspend(struct early_suspend *h) +{ + acquire_console_sem(); + orig_fgconsole = fg_console; + if (vc_allocate(EARLY_SUSPEND_CONSOLE)) + goto err; + if (set_console(EARLY_SUSPEND_CONSOLE)) + goto err; + release_console_sem(); + + if (vt_waitactive(EARLY_SUSPEND_CONSOLE)) + pr_warning("console_early_suspend: Can't switch VCs.\n"); + return; +err: + pr_warning("console_early_suspend: Can't set console\n"); + release_console_sem(); +} + +static void console_late_resume(struct early_suspend *h) +{ + int ret; + acquire_console_sem(); + ret = set_console(orig_fgconsole); + release_console_sem(); + if (ret) { + pr_warning("console_late_resume: Can't set console.\n"); + return; + } + + if (vt_waitactive(orig_fgconsole)) + pr_warning("console_late_resume: Can't switch VCs.\n"); +} + +static struct early_suspend console_early_suspend_desc = { + .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING, + .suspend = console_early_suspend, + .resume = console_late_resume, +}; + +static int __init console_early_suspend_init(void) +{ + register_early_suspend(&console_early_suspend_desc); + return 0; +} + +static void __exit console_early_suspend_exit(void) +{ + unregister_early_suspend(&console_early_suspend_desc); +} + +module_init(console_early_suspend_init); +module_exit(console_early_suspend_exit); + -- cgit v1.2.3 From 1c26813171af15585f50070dc158bba056d1cf2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Mon, 14 Dec 2009 22:14:52 -0800 Subject: consoleearlysuspend: Fix for 2.6.32 vt_waitactive now needs a 1 based console number Change-Id: I07ab9a3773c93d67c09d928c8d5494ce823ffa2e --- kernel/power/consoleearlysuspend.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/power/consoleearlysuspend.c b/kernel/power/consoleearlysuspend.c index a8befb419158..a3edcb267389 100644 --- a/kernel/power/consoleearlysuspend.c +++ b/kernel/power/consoleearlysuspend.c @@ -33,7 +33,7 @@ static void console_early_suspend(struct early_suspend *h) goto err; release_console_sem(); - if (vt_waitactive(EARLY_SUSPEND_CONSOLE)) + if (vt_waitactive(EARLY_SUSPEND_CONSOLE + 1)) pr_warning("console_early_suspend: Can't switch VCs.\n"); return; err: @@ -52,7 +52,7 @@ static void console_late_resume(struct early_suspend *h) return; } - if (vt_waitactive(orig_fgconsole)) + if (vt_waitactive(orig_fgconsole + 1)) pr_warning("console_late_resume: Can't switch VCs.\n"); } -- cgit v1.2.3 From 8d0aa9f0bbb71790756d60c2a4550f7428ee7869 Mon Sep 17 00:00:00 2001 From: Rebecca Schultz Date: Thu, 17 Jul 2008 18:14:55 -0700 Subject: PM: earlysuspend: Removing dependence on console. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than signaling a full update of the display from userspace via a console switch, this patch introduces 2 files int /sys/power, wait_for_fb_sleep and wait_for_fb_wake. Reading these files will block until the requested state has been entered. When a read from wait_for_fb_sleep returns userspace should stop drawing. When wait_for_fb_wake returns, it should do a full update. If either are called when the fb driver is already in the requested state, they will return immediately. Signed-off-by: Rebecca Schultz Signed-off-by: Arve Hjønnevåg --- kernel/power/Kconfig | 9 +++ kernel/power/Makefile | 1 + kernel/power/fbearlysuspend.c | 153 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 163 insertions(+) create mode 100644 kernel/power/fbearlysuspend.c (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index eec7034a359a..3a27c53a0508 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -61,6 +61,7 @@ config EARLYSUSPEND choice prompt "User-space screen access" + default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE default CONSOLE_EARLYSUSPEND depends on HAS_EARLYSUSPEND @@ -74,6 +75,14 @@ choice Register early suspend handler to perform a console switch to when user-space should stop drawing to the screen and a switch back when it should resume. + + config FB_EARLYSUSPEND + bool "Sysfs interface" + depends on HAS_EARLYSUSPEND + ---help--- + Register early suspend handler that notifies and waits for + user-space through sysfs when user-space should stop drawing + to the screen and notifies user-space when it should resume. endchoice config HIBERNATE_CALLBACKS diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 0d1c5e4fda19..493f19d2a293 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -12,5 +12,6 @@ obj-$(CONFIG_WAKELOCK) += wakelock.o obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o +obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c new file mode 100644 index 000000000000..15137650149c --- /dev/null +++ b/kernel/power/fbearlysuspend.c @@ -0,0 +1,153 @@ +/* kernel/power/fbearlysuspend.c + * + * Copyright (C) 2005-2008 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "power.h" + +static wait_queue_head_t fb_state_wq; +static DEFINE_SPINLOCK(fb_state_lock); +static enum { + FB_STATE_STOPPED_DRAWING, + FB_STATE_REQUEST_STOP_DRAWING, + FB_STATE_DRAWING_OK, +} fb_state; + +/* tell userspace to stop drawing, wait for it to stop */ +static void stop_drawing_early_suspend(struct early_suspend *h) +{ + int ret; + unsigned long irq_flags; + + spin_lock_irqsave(&fb_state_lock, irq_flags); + fb_state = FB_STATE_REQUEST_STOP_DRAWING; + spin_unlock_irqrestore(&fb_state_lock, irq_flags); + + wake_up_all(&fb_state_wq); + ret = wait_event_timeout(fb_state_wq, + fb_state == FB_STATE_STOPPED_DRAWING, + HZ); + if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING)) + pr_warning("stop_drawing_early_suspend: timeout waiting for " + "userspace to stop drawing\n"); +} + +/* tell userspace to start drawing */ +static void start_drawing_late_resume(struct early_suspend *h) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&fb_state_lock, irq_flags); + fb_state = FB_STATE_DRAWING_OK; + spin_unlock_irqrestore(&fb_state_lock, irq_flags); + wake_up(&fb_state_wq); +} + +static struct early_suspend stop_drawing_early_suspend_desc = { + .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING, + .suspend = stop_drawing_early_suspend, + .resume = start_drawing_late_resume, +}; + +static ssize_t wait_for_fb_sleep_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + int ret; + + ret = wait_event_interruptible(fb_state_wq, + fb_state != FB_STATE_DRAWING_OK); + if (ret && fb_state == FB_STATE_DRAWING_OK) + return ret; + else + s += sprintf(buf, "sleeping"); + return s - buf; +} + +static ssize_t wait_for_fb_wake_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + char *s = buf; + int ret; + unsigned long irq_flags; + + spin_lock_irqsave(&fb_state_lock, irq_flags); + if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) { + fb_state = FB_STATE_STOPPED_DRAWING; + wake_up(&fb_state_wq); + } + spin_unlock_irqrestore(&fb_state_lock, irq_flags); + + ret = wait_event_interruptible(fb_state_wq, + fb_state == FB_STATE_DRAWING_OK); + if (ret && fb_state != FB_STATE_DRAWING_OK) + return ret; + else + s += sprintf(buf, "awake"); + + return s - buf; +} + +#define power_ro_attr(_name) \ +static struct kobj_attribute _name##_attr = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = 0444, \ + }, \ + .show = _name##_show, \ + .store = NULL, \ +} + +power_ro_attr(wait_for_fb_sleep); +power_ro_attr(wait_for_fb_wake); + +static struct attribute *g[] = { + &wait_for_fb_sleep_attr.attr, + &wait_for_fb_wake_attr.attr, + NULL, +}; + +static struct attribute_group attr_group = { + .attrs = g, +}; + +static int __init android_power_init(void) +{ + int ret; + + init_waitqueue_head(&fb_state_wq); + fb_state = FB_STATE_DRAWING_OK; + + ret = sysfs_create_group(power_kobj, &attr_group); + if (ret) { + pr_err("android_power_init: sysfs_create_group failed\n"); + return ret; + } + + register_early_suspend(&stop_drawing_early_suspend_desc); + return 0; +} + +static void __exit android_power_exit(void) +{ + unregister_early_suspend(&stop_drawing_early_suspend_desc); + sysfs_remove_group(power_kobj, &attr_group); +} + +module_init(android_power_init); +module_exit(android_power_exit); + -- cgit v1.2.3 From e84e563ae2ed297d5d35d8da3b7302384ff6c7da Mon Sep 17 00:00:00 2001 From: San Mehat Date: Thu, 21 May 2009 14:10:06 -0700 Subject: cgroup: Add generic cgroup subsystem permission checks. Rather than using explicit euid == 0 checks when trying to move tasks into a cgroup via CFS, move permission checks into each specific cgroup subsystem. If a subsystem does not specify a 'can_attach' handler, then we fall back to doing our checks the old way. This way non-root processes can add arbitrary processes to a cgroup if all the registered subsystems on that cgroup agree. Also change explicit euid == 0 check to CAP_SYS_ADMIN Signed-off-by: San Mehat --- kernel/cgroup.c | 10 ++++++++++ kernel/cgroup_freezer.c | 8 ++++++++ kernel/cpuset.c | 7 +++++++ kernel/sched.c | 9 +++++++++ 4 files changed, 34 insertions(+) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1d2b6ceea95d..e06035aa3038 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -60,6 +60,7 @@ #include #include #include /* used in cgroup_attach_proc */ +#include #include @@ -1842,6 +1843,15 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) failed_ss = ss; goto out; } + } else if (!capable(CAP_SYS_ADMIN)) { + const struct cred *cred = current_cred(), *tcred; + + /* No can_attach() - check perms generically */ + tcred = __task_cred(tsk); + if (cred->euid != tcred->uid && + cred->euid != tcred->suid) { + return -EACCES; + } } if (ss->can_attach_task) { retval = ss->can_attach_task(cgrp, tsk); diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index e691818d7e45..6ebda1df9b70 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -164,6 +164,14 @@ static int freezer_can_attach(struct cgroup_subsys *ss, { struct freezer *freezer; + if ((current != task) && (!capable(CAP_SYS_ADMIN))) { + const struct cred *cred = current_cred(), *tcred; + + tcred = __task_cred(task); + if (cred->euid != tcred->uid && cred->euid != tcred->suid) + return -EPERM; + } + /* * Anything frozen can't move or be moved to/from. */ diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 10131fdaff70..3b2305163da4 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1373,6 +1373,13 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, { struct cpuset *cs = cgroup_cs(cont); + if ((current != task) && (!capable(CAP_SYS_ADMIN))) { + const struct cred *cred = current_cred(), *tcred; + + if (cred->euid != tcred->uid && cred->euid != tcred->suid) + return -EPERM; + } + if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) return -ENOSPC; diff --git a/kernel/sched.c b/kernel/sched.c index d0f600c94843..279d2552dae7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8967,6 +8967,15 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) static int cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) { + if ((current != tsk) && (!capable(CAP_SYS_NICE))) { + const struct cred *cred = current_cred(), *tcred; + + tcred = __task_cred(tsk); + + if (cred->euid != tcred->uid && cred->euid != tcred->suid) + return -EPERM; + } + #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) return -EINVAL; -- cgit v1.2.3 From c69e7f2ea7d1d77dc5b49c07b9a6fdabffa26d06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Thu, 15 Jan 2009 19:07:27 -0800 Subject: Revert "printk: remove unused code from kernel/printk.c" This reverts commit acff181d3574244e651913df77332e897b88bff4. --- kernel/printk.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 3e565d1cd9da..4f3a6481e4d5 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -294,6 +294,45 @@ static inline void boot_delay_msec(void) } #endif +/* + * Return the number of unread characters in the log buffer. + */ +static int log_buf_get_len(void) +{ + return logged_chars; +} + +/* + * Copy a range of characters from the log buffer. + */ +int log_buf_copy(char *dest, int idx, int len) +{ + int ret, max; + bool took_lock = false; + + if (!oops_in_progress) { + spin_lock_irq(&logbuf_lock); + took_lock = true; + } + + max = log_buf_get_len(); + if (idx < 0 || idx >= max) { + ret = -1; + } else { + if (len > max) + len = max; + ret = len; + idx += (log_end - max); + while (len-- > 0) + dest[len] = LOG_BUF(idx + len); + } + + if (took_lock) + spin_unlock_irq(&logbuf_lock); + + return ret; +} + #ifdef CONFIG_SECURITY_DMESG_RESTRICT int dmesg_restrict = 1; #else -- cgit v1.2.3 From d006c0fd9ae4cdc8aaf9e2802aefa78672e381b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Mon, 14 Apr 2008 21:35:25 -0700 Subject: printk: Fix log_buf_copy termination. If idx was non-zero and the log had wrapped, len did not get truncated to stop at the last byte written to the log. --- kernel/printk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 4f3a6481e4d5..2a8a61c666ba 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -319,8 +319,8 @@ int log_buf_copy(char *dest, int idx, int len) if (idx < 0 || idx >= max) { ret = -1; } else { - if (len > max) - len = max; + if (len > max - idx) + len = max - idx; ret = len; idx += (log_end - max); while (len-- > 0) -- cgit v1.2.3 From d46f9b31be7f79fc2b457396a2ad5579e2326b81 Mon Sep 17 00:00:00 2001 From: San Mehat Date: Thu, 6 Aug 2009 10:25:21 -0700 Subject: kernel: printk: Add non exported function for clearing the log ring buffer Signed-off-by: San Mehat --- kernel/printk.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index 2a8a61c666ba..beff95d3bcb8 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -302,6 +302,14 @@ static int log_buf_get_len(void) return logged_chars; } +/* + * Clears the ring-buffer + */ +void log_buf_clear(void) +{ + logged_chars = 0; +} + /* * Copy a range of characters from the log buffer. */ -- cgit v1.2.3 From 3840df53cda29bd78dc10fdfe371579c3455893b Mon Sep 17 00:00:00 2001 From: Mike Chan Date: Tue, 16 Feb 2010 14:18:55 -0800 Subject: power: wakelock: Print active wakelocks when has_wake_lock() is called When DEBUG_SUSPEND is enabled print active wakelocks when we check if there are any active wakelocks. In print_active_locks(), print expired wakelocks if DEBUG_EXPIRE is enabled Change-Id: Ib1cb795555e71ff23143a2bac7c8a58cbce16547 Signed-off-by: Mike Chan --- kernel/power/wakelock.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 4b0e26065be0..ca48bb8d316b 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -208,20 +208,23 @@ static void expire_wake_lock(struct wake_lock *lock) /* Caller must acquire the list_lock spinlock */ static void print_active_locks(int type) { - unsigned long irqflags; struct wake_lock *lock; + bool print_expired = true; BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); list_for_each_entry(lock, &active_wake_locks[type], link) { if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { long timeout = lock->expires - jiffies; - if (timeout <= 0) - pr_info("wake lock %s, expired\n", lock->name); - else + if (timeout > 0) pr_info("active wake lock %s, time left %ld\n", lock->name, timeout); - } else + else if (print_expired) + pr_info("wake lock %s, expired\n", lock->name); + } else { pr_info("active wake lock %s\n", lock->name); + if (!debug_mask & DEBUG_EXPIRE) + print_expired = false; + } } } @@ -250,6 +253,8 @@ long has_wake_lock(int type) unsigned long irqflags; spin_lock_irqsave(&list_lock, irqflags); ret = has_wake_lock_locked(type); + if (ret && (debug_mask & DEBUG_SUSPEND) && type == WAKE_LOCK_SUSPEND) + print_active_locks(type); spin_unlock_irqrestore(&list_lock, irqflags); return ret; } -- cgit v1.2.3 From 4c97b1b8baa48de5675affccd1150f3b00310d18 Mon Sep 17 00:00:00 2001 From: San Mehat Date: Thu, 6 May 2010 15:37:55 -0700 Subject: sched: Add a generic notifier when a task struct is about to be freed This patch adds a notifier which can be used by subsystems that may be interested in when a task has completely died and is about to have it's last resource freed. The Android lowmemory killer uses this to determine when a task it has killed has finally given up its goods. Signed-off-by: San Mehat --- kernel/fork.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'kernel') diff --git a/kernel/fork.c b/kernel/fork.c index 8e6b6f4fb272..f65fa0627c04 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -153,6 +153,9 @@ struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; +/* Notifier list called when a task struct is freed */ +static ATOMIC_NOTIFIER_HEAD(task_free_notifier); + static void account_kernel_stack(struct thread_info *ti, int account) { struct zone *zone = page_zone(virt_to_page(ti)); @@ -184,6 +187,18 @@ static inline void put_signal_struct(struct signal_struct *sig) free_signal_struct(sig); } +int task_free_register(struct notifier_block *n) +{ + return atomic_notifier_chain_register(&task_free_notifier, n); +} +EXPORT_SYMBOL(task_free_register); + +int task_free_unregister(struct notifier_block *n) +{ + return atomic_notifier_chain_unregister(&task_free_notifier, n); +} +EXPORT_SYMBOL(task_free_unregister); + void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); @@ -194,6 +209,7 @@ void __put_task_struct(struct task_struct *tsk) delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); + atomic_notifier_call_chain(&task_free_notifier, 0, tsk); if (!profile_handoff_task(tsk)) free_task(tsk); } -- cgit v1.2.3 From 9075fa48d0b4453038b53a12d2a474312bf68742 Mon Sep 17 00:00:00 2001 From: Mike Chan Date: Mon, 10 May 2010 17:54:48 -0700 Subject: scheduler: cpuacct: Enable platform hooks to track cpuusage for CPU frequencies Introduce new platform callback hooks for cpuacct for tracking CPU frequencies Not all platforms / architectures have a set CPU_FREQ_TABLE defined for CPU transition speeds. In order to track time spent in at various CPU frequencies, we enable platform callbacks from cpuacct for this accounting. Architectures that support overclock boosting, or don't have pre-defined frequency tables can implement their own bucketing system that makes sense given their cpufreq scaling abilities. New file: cpuacct.cpufreq reports the CPU time (in nanoseconds) spent at each CPU frequency. Change-Id: I10a80b3162e6fff3a8a2f74dd6bb37e88b12ba96 Signed-off-by: Mike Chan --- kernel/sched.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index 279d2552dae7..a01185969457 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -71,6 +71,7 @@ #include #include #include +#include #include #include @@ -9104,8 +9105,30 @@ struct cpuacct { u64 __percpu *cpuusage; struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; struct cpuacct *parent; + struct cpuacct_charge_calls *cpufreq_fn; + void *cpuacct_data; }; +static struct cpuacct *cpuacct_root; + +/* Default calls for cpufreq accounting */ +static struct cpuacct_charge_calls *cpuacct_cpufreq; +int cpuacct_register_cpufreq(struct cpuacct_charge_calls *fn) +{ + cpuacct_cpufreq = fn; + + /* + * Root node is created before platform can register callbacks, + * initalize here. + */ + if (cpuacct_root && fn) { + cpuacct_root->cpufreq_fn = fn; + if (fn->init) + fn->init(&cpuacct_root->cpuacct_data); + } + return 0; +} + struct cgroup_subsys cpuacct_subsys; /* return cpu accounting group corresponding to this container */ @@ -9140,8 +9163,16 @@ static struct cgroup_subsys_state *cpuacct_create( if (percpu_counter_init(&ca->cpustat[i], 0)) goto out_free_counters; + ca->cpufreq_fn = cpuacct_cpufreq; + + /* If available, have platform code initalize cpu frequency table */ + if (ca->cpufreq_fn && ca->cpufreq_fn->init) + ca->cpufreq_fn->init(&ca->cpuacct_data); + if (cgrp->parent) ca->parent = cgroup_ca(cgrp->parent); + else + cpuacct_root = ca; return &ca->css; @@ -9269,6 +9300,16 @@ static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft, return 0; } +static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft, + struct cgroup_map_cb *cb) +{ + struct cpuacct *ca = cgroup_ca(cgrp); + if (ca->cpufreq_fn && ca->cpufreq_fn->show) + ca->cpufreq_fn->show(ca->cpuacct_data, cb); + + return 0; +} + static struct cftype files[] = { { .name = "usage", @@ -9283,6 +9324,10 @@ static struct cftype files[] = { .name = "stat", .read_map = cpuacct_stats_show, }, + { + .name = "cpufreq", + .read_map = cpuacct_cpufreq_show, + }, }; static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) @@ -9312,6 +9357,10 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) for (; ca; ca = ca->parent) { u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); *cpuusage += cputime; + + /* Call back into platform code to account for CPU speeds */ + if (ca->cpufreq_fn && ca->cpufreq_fn->charge) + ca->cpufreq_fn->charge(ca->cpuacct_data, cputime, cpu); } rcu_read_unlock(); -- cgit v1.2.3 From 6c0d4dd0527f87230469a088b3b51823a96de7ce Mon Sep 17 00:00:00 2001 From: Mike Chan Date: Wed, 12 May 2010 15:52:14 -0700 Subject: scheduler: cpuacct: Enable platform callbacks for cpuacct power tracking Platform must register cpu power function that return power in milliWatt seconds. Change-Id: I1caa0335e316c352eee3b1ddf326fcd4942bcbe8 Signed-off-by: Mike Chan --- kernel/sched.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'kernel') diff --git a/kernel/sched.c b/kernel/sched.c index a01185969457..27ee31e5673e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -9304,12 +9304,28 @@ static int cpuacct_cpufreq_show(struct cgroup *cgrp, struct cftype *cft, struct cgroup_map_cb *cb) { struct cpuacct *ca = cgroup_ca(cgrp); - if (ca->cpufreq_fn && ca->cpufreq_fn->show) - ca->cpufreq_fn->show(ca->cpuacct_data, cb); + if (ca->cpufreq_fn && ca->cpufreq_fn->cpufreq_show) + ca->cpufreq_fn->cpufreq_show(ca->cpuacct_data, cb); return 0; } +/* return total cpu power usage (milliWatt second) of a group */ +static u64 cpuacct_powerusage_read(struct cgroup *cgrp, struct cftype *cft) +{ + int i; + struct cpuacct *ca = cgroup_ca(cgrp); + u64 totalpower = 0; + + if (ca->cpufreq_fn && ca->cpufreq_fn->power_usage) + for_each_present_cpu(i) { + totalpower += ca->cpufreq_fn->power_usage( + ca->cpuacct_data); + } + + return totalpower; +} + static struct cftype files[] = { { .name = "usage", @@ -9328,6 +9344,10 @@ static struct cftype files[] = { .name = "cpufreq", .read_map = cpuacct_cpufreq_show, }, + { + .name = "power", + .read_u64 = cpuacct_powerusage_read + }, }; static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) -- cgit v1.2.3 From b19e9d33c93d717e558367162712a4a2edd49753 Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Sat, 21 Aug 2010 17:27:02 -0700 Subject: wakelock: Fix operator precedence bug Change-Id: I21366ace371d1b8f4684ddbe4ea8d555a926ac21 Signed-off-by: Colin Cross --- kernel/power/wakelock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index ca48bb8d316b..d3d7fd682c00 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -222,7 +222,7 @@ static void print_active_locks(int type) pr_info("wake lock %s, expired\n", lock->name); } else { pr_info("active wake lock %s\n", lock->name); - if (!debug_mask & DEBUG_EXPIRE) + if (!(debug_mask & DEBUG_EXPIRE)) print_expired = false; } } -- cgit v1.2.3 From 4821b01af559f225600d4c3b303d89f9ca4f765b Mon Sep 17 00:00:00 2001 From: Erik Gilling Date: Mon, 30 Aug 2010 18:22:20 -0700 Subject: power: wakelock: call __get_wall_to_monotonic() instead of using wall_to_monotonic Change-Id: I9e9c3b923bf9a22ffd48f80a72050289496e57d8 --- kernel/power/wakelock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index d3d7fd682c00..ee9781c5adb2 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -71,7 +71,7 @@ int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) if (timeout > 0) return 0; kt = current_kernel_time(); - tomono = wall_to_monotonic; + tomono = __get_wall_to_monotonic(); } while (read_seqretry(&xtime_lock, seq)); jiffies_to_timespec(-timeout, &delta); set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec, -- cgit v1.2.3 From a300f2997a5bbba4dbab8dd1c689d5ef7521ad9a Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Tue, 23 Nov 2010 21:37:03 -0800 Subject: cgroup: Set CGRP_RELEASABLE when adding to a cgroup Changes the meaning of CGRP_RELEASABLE to be set on any cgroup that has ever had a task or cgroup in it, or had css_get called on it. The bit is set in cgroup_attach_task, cgroup_create, and __css_get. It is not necessary to set the bit in cgroup_fork, as the task is either in the root cgroup, in which can never be released, or the task it was forked from already set the bit in croup_attach_task. Signed-off-by: Colin Cross --- kernel/cgroup.c | 53 +++++++++++++++++++++++------------------------------ 1 file changed, 23 insertions(+), 30 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e06035aa3038..9e853b6852f0 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -336,7 +336,15 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[]) * compiled into their kernel but not actually in use */ static int use_task_css_set_links __read_mostly; -static void __put_css_set(struct css_set *cg, int taskexit) +/* + * refcounted get/put for css_set objects + */ +static inline void get_css_set(struct css_set *cg) +{ + atomic_inc(&cg->refcount); +} + +static void put_css_set(struct css_set *cg) { struct cg_cgroup_link *link; struct cg_cgroup_link *saved_link; @@ -362,12 +370,8 @@ static void __put_css_set(struct css_set *cg, int taskexit) struct cgroup *cgrp = link->cgrp; list_del(&link->cg_link_list); list_del(&link->cgrp_link_list); - if (atomic_dec_and_test(&cgrp->count) && - notify_on_release(cgrp)) { - if (taskexit) - set_bit(CGRP_RELEASABLE, &cgrp->flags); + if (atomic_dec_and_test(&cgrp->count)) check_for_release(cgrp); - } kfree(link); } @@ -376,24 +380,6 @@ static void __put_css_set(struct css_set *cg, int taskexit) kfree_rcu(cg, rcu_head); } -/* - * refcounted get/put for css_set objects - */ -static inline void get_css_set(struct css_set *cg) -{ - atomic_inc(&cg->refcount); -} - -static inline void put_css_set(struct css_set *cg) -{ - __put_css_set(cg, 0); -} - -static inline void put_css_set_taskexit(struct css_set *cg) -{ - __put_css_set(cg, 1); -} - /* * compare_css_sets - helper function for find_existing_css_set(). * @cg: candidate css_set being tested @@ -1875,6 +1861,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) ss->attach(ss, cgrp, oldcgrp, tsk); } + set_bit(CGRP_RELEASABLE, &cgrp->flags); synchronize_rcu(); /* @@ -3824,6 +3811,8 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, if (err < 0) goto err_remove; + set_bit(CGRP_RELEASABLE, &parent->flags); + /* The cgroup directory was pre-locked for us */ BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex)); @@ -4040,7 +4029,6 @@ again: cgroup_d_remove_dir(d); dput(d); - set_bit(CGRP_RELEASABLE, &parent->flags); check_for_release(parent); /* @@ -4640,7 +4628,7 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) task_unlock(tsk); if (cg) - put_css_set_taskexit(cg); + put_css_set(cg); } /** @@ -4693,6 +4681,14 @@ static void check_for_release(struct cgroup *cgrp) } } +/* Caller must verify that the css is not for root cgroup */ +void __css_get(struct cgroup_subsys_state *css, int count) +{ + atomic_add(count, &css->refcnt); + set_bit(CGRP_RELEASABLE, &css->cgroup->flags); +} +EXPORT_SYMBOL_GPL(__css_get); + /* Caller must verify that the css is not for root cgroup */ void __css_put(struct cgroup_subsys_state *css, int count) { @@ -4701,10 +4697,7 @@ void __css_put(struct cgroup_subsys_state *css, int count) rcu_read_lock(); val = atomic_sub_return(count, &css->refcnt); if (val == 1) { - if (notify_on_release(cgrp)) { - set_bit(CGRP_RELEASABLE, &cgrp->flags); - check_for_release(cgrp); - } + check_for_release(cgrp); cgroup_wakeup_rmdir_waiter(cgrp); } rcu_read_unlock(); -- cgit v1.2.3 From 55488ed10abc42e33f3997c3860e52d4dbc170af Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Tue, 23 Nov 2010 21:37:04 -0800 Subject: cgroup: Remove call to synchronize_rcu in cgroup_attach_task synchronize_rcu can be very expensive, averaging 100 ms in some cases. In cgroup_attach_task, it is used to prevent a task->cgroups pointer dereferenced in an RCU read side critical section from being invalidated, by delaying the call to put_css_set until after an RCU grace period. To avoid the call to synchronize_rcu, make the put_css_set call rcu-safe by moving the deletion of the css_set links into free_css_set_work, scheduled by the rcu callback free_css_set_rcu. The decrement of the cgroup refcount is no longer synchronous with the call to put_css_set, which can result in the cgroup refcount staying positive after the last call to cgroup_attach_task returns. To allow the cgroup to be deleted with cgroup_rmdir synchronously after cgroup_attach_task, have rmdir check the refcount of all associated css_sets. If cgroup_rmdir is called on a cgroup for which the css_sets all have refcount zero but the cgroup refcount is nonzero, reuse the rmdir waitqueue to block the rmdir until free_css_set_work is called. Signed-off-by: Colin Cross --- kernel/cgroup.c | 134 ++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 86 insertions(+), 48 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 9e853b6852f0..58e9d298165d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -271,6 +271,33 @@ static void cgroup_release_agent(struct work_struct *work); static DECLARE_WORK(release_agent_work, cgroup_release_agent); static void check_for_release(struct cgroup *cgrp); +/* + * A queue for waiters to do rmdir() cgroup. A tasks will sleep when + * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some + * reference to css->refcnt. In general, this refcnt is expected to goes down + * to zero, soon. + * + * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex; + */ +DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); + +static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp) +{ + if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) + wake_up_all(&cgroup_rmdir_waitq); +} + +void cgroup_exclude_rmdir(struct cgroup_subsys_state *css) +{ + css_get(css); +} + +void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css) +{ + cgroup_wakeup_rmdir_waiter(css->cgroup); + css_put(css); +} + /* Link structure for associating css_set objects with cgroups */ struct cg_cgroup_link { /* @@ -330,6 +357,37 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[]) return &css_set_table[index]; } +static void free_css_set_work(struct work_struct *work) +{ + struct css_set *cg = container_of(work, struct css_set, work); + struct cg_cgroup_link *link; + struct cg_cgroup_link *saved_link; + + write_lock(&css_set_lock); + list_for_each_entry_safe(link, saved_link, &cg->cg_links, + cg_link_list) { + struct cgroup *cgrp = link->cgrp; + list_del(&link->cg_link_list); + list_del(&link->cgrp_link_list); + if (atomic_dec_and_test(&cgrp->count)) { + check_for_release(cgrp); + cgroup_wakeup_rmdir_waiter(cgrp); + } + kfree(link); + } + write_unlock(&css_set_lock); + + kfree(cg); +} + +static void free_css_set_rcu(struct rcu_head *obj) +{ + struct css_set *cg = container_of(obj, struct css_set, rcu_head); + + INIT_WORK(&cg->work, free_css_set_work); + schedule_work(&cg->work); +} + /* We don't maintain the lists running through each css_set to its * task until after the first call to cgroup_iter_start(). This * reduces the fork()/exit() overhead for people who have cgroups @@ -346,8 +404,6 @@ static inline void get_css_set(struct css_set *cg) static void put_css_set(struct css_set *cg) { - struct cg_cgroup_link *link; - struct cg_cgroup_link *saved_link; /* * Ensure that the refcount doesn't hit zero while any readers * can see it. Similar to atomic_dec_and_lock(), but for an @@ -361,23 +417,11 @@ static void put_css_set(struct css_set *cg) return; } - /* This css_set is dead. unlink it and release cgroup refcounts */ hlist_del(&cg->hlist); css_set_count--; - list_for_each_entry_safe(link, saved_link, &cg->cg_links, - cg_link_list) { - struct cgroup *cgrp = link->cgrp; - list_del(&link->cg_link_list); - list_del(&link->cgrp_link_list); - if (atomic_dec_and_test(&cgrp->count)) - check_for_release(cgrp); - - kfree(link); - } - write_unlock(&css_set_lock); - kfree_rcu(cg, rcu_head); + call_rcu(&cg->rcu_head, free_css_set_rcu); } /* @@ -709,9 +753,9 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task, * cgroup_attach_task(), which overwrites one tasks cgroup pointer with * another. It does so using cgroup_mutex, however there are * several performance critical places that need to reference - * task->cgroup without the expense of grabbing a system global + * task->cgroups without the expense of grabbing a system global * mutex. Therefore except as noted below, when dereferencing or, as - * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use + * in cgroup_attach_task(), modifying a task's cgroups pointer we use * task_lock(), which acts on a spinlock (task->alloc_lock) already in * the task_struct routinely used for such matters. * @@ -900,33 +944,6 @@ static void cgroup_d_remove_dir(struct dentry *dentry) remove_dir(dentry); } -/* - * A queue for waiters to do rmdir() cgroup. A tasks will sleep when - * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some - * reference to css->refcnt. In general, this refcnt is expected to goes down - * to zero, soon. - * - * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex; - */ -DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); - -static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp) -{ - if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) - wake_up_all(&cgroup_rmdir_waitq); -} - -void cgroup_exclude_rmdir(struct cgroup_subsys_state *css) -{ - css_get(css); -} - -void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css) -{ - cgroup_wakeup_rmdir_waiter(css->cgroup); - css_put(css); -} - /* * Call with cgroup_mutex held. Drops reference counts on modules, including * any duplicate ones that parse_cgroupfs_options took. If this function @@ -1810,6 +1827,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) struct cgroup_subsys *ss, *failed_ss = NULL; struct cgroup *oldcgrp; struct cgroupfs_root *root = cgrp->root; + struct css_set *cg; /* Nothing to do if the task is already in that cgroup */ oldcgrp = task_cgroup_from_root(tsk, root); @@ -1848,6 +1866,11 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) } } + task_lock(tsk); + cg = tsk->cgroups; + get_css_set(cg); + task_unlock(tsk); + retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false); if (retval) goto out; @@ -1860,9 +1883,9 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) if (ss->attach) ss->attach(ss, cgrp, oldcgrp, tsk); } - set_bit(CGRP_RELEASABLE, &cgrp->flags); - synchronize_rcu(); + /* put_css_set will not destroy cg until after an RCU grace period */ + put_css_set(cg); /* * wake up rmdir() waiter. the rmdir should fail since the cgroup @@ -3944,6 +3967,21 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp) return !failed; } +/* checks if all of the css_sets attached to a cgroup have a refcount of 0. + * Must be called with css_set_lock held */ +static int cgroup_css_sets_empty(struct cgroup *cgrp) +{ + struct cg_cgroup_link *link; + + list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) { + struct css_set *cg = link->cg; + if (atomic_read(&cg->refcount) > 0) + return 0; + } + + return 1; +} + static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) { struct cgroup *cgrp = dentry->d_fsdata; @@ -3956,7 +3994,7 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) /* the vfs holds both inode->i_mutex already */ again: mutex_lock(&cgroup_mutex); - if (atomic_read(&cgrp->count) != 0) { + if (!cgroup_css_sets_empty(cgrp)) { mutex_unlock(&cgroup_mutex); return -EBUSY; } @@ -3989,7 +4027,7 @@ again: mutex_lock(&cgroup_mutex); parent = cgrp->parent; - if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { + if (!cgroup_css_sets_empty(cgrp) || !list_empty(&cgrp->children)) { clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); mutex_unlock(&cgroup_mutex); return -EBUSY; -- cgit v1.2.3 From 101fca7c95b1c1e7d58148784ed7dd70b5c5cc02 Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Mon, 24 Jan 2011 14:36:39 -0800 Subject: Print pending wakeup IRQ preventing suspend to dmesg Change-Id: I36f90735c75fb7c7ab1084775ec0d0ab02336e6e Signed-off-by: Todd Poynor --- kernel/irq/pm.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 15e53b1766a6..1f455f36fba3 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -104,8 +104,11 @@ int check_wakeup_irqs(void) for_each_irq_desc(irq, desc) { if (irqd_is_wakeup_set(&desc->irq_data)) { - if (desc->istate & IRQS_PENDING) + if (desc->istate & IRQS_PENDING) { + pr_info("Wakeup IRQ %d %s pending, suspend aborted\n", + irq, desc->name ? desc->name : ""); return -EBUSY; + } continue; } /* -- cgit v1.2.3 From c8ccd9e6e6a3921a1d616fb9eca8f610cf648b01 Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Wed, 30 Mar 2011 12:37:49 -0700 Subject: power: wakelock: use get_xtime_and_monotonic_and_sleep_offset in get_expired_time Change-Id: I6ebe6b954b2ff328d46898d683650dafb9c4fe74 Signed-off-by: Erik Gilling --- kernel/power/wakelock.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index ee9781c5adb2..c10d0ee7907e 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -60,19 +60,15 @@ int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) struct timespec kt; struct timespec tomono; struct timespec delta; - unsigned long seq; + struct timespec sleep; long timeout; if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE)) return 0; - do { - seq = read_seqbegin(&xtime_lock); - timeout = lock->expires - jiffies; - if (timeout > 0) - return 0; - kt = current_kernel_time(); - tomono = __get_wall_to_monotonic(); - } while (read_seqretry(&xtime_lock, seq)); + get_xtime_and_monotonic_and_sleep_offset(&kt, &tomono, &sleep); + timeout = lock->expires - jiffies; + if (timeout > 0) + return 0; jiffies_to_timespec(-timeout, &delta); set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec, kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec); -- cgit v1.2.3 From e19707ff2e38d4270db529e46129acf0d3ec2939 Mon Sep 17 00:00:00 2001 From: mhban Date: Wed, 8 Jun 2011 09:13:15 +0000 Subject: printk: do not handle non-sleepable notification in console_cpu_notify - CPU_DYING should not be handled in sleepable context Signed-off-by: Minho Ban --- kernel/printk.c | 1 - 1 file changed, 1 deletion(-) (limited to 'kernel') diff --git a/kernel/printk.c b/kernel/printk.c index beff95d3bcb8..1baace7d8674 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1205,7 +1205,6 @@ static int __cpuinit console_cpu_notify(struct notifier_block *self, switch (action) { case CPU_ONLINE: case CPU_DEAD: - case CPU_DYING: case CPU_DOWN_FAILED: case CPU_UP_CANCELED: console_lock(); -- cgit v1.2.3 From 8b5682907f27c1bad73ee5ec9584a338ea9cd0be Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Tue, 14 Jun 2011 11:36:39 -0700 Subject: HACK: time: Disable alarmtimer kernel/time/alarmtimer.c conflicts with drivers/rtc/alarm.c, disable it for now. Change-Id: I6cdb3b885828d45836a54971adf16143039b0a0e Signed-off-by: Colin Cross --- kernel/time/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/time/Makefile b/kernel/time/Makefile index e2fd74b8e8c2..cae2ad7491b0 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile @@ -1,5 +1,5 @@ obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o -obj-y += timeconv.o posix-clock.o alarmtimer.o +obj-y += timeconv.o posix-clock.o #alarmtimer.o obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o -- cgit v1.2.3 From f971f6c015d74c9653a15fa1357144ead2ff7e7e Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Tue, 12 Jul 2011 20:12:45 -0700 Subject: Revert "cgroup: Add generic cgroup subsystem permission checks." This reverts commit 1d38bc7d0523af2233b4280e2aeab34c6a076665. Change-Id: I2c5066b696cbdd5ca117ed74718bcb7e70e878e7 Signed-off-by: Colin Cross --- kernel/cgroup.c | 10 ---------- kernel/cgroup_freezer.c | 8 -------- kernel/cpuset.c | 7 ------- kernel/sched.c | 9 --------- 4 files changed, 34 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 58e9d298165d..84dd3818633f 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -60,7 +60,6 @@ #include #include #include /* used in cgroup_attach_proc */ -#include #include @@ -1847,15 +1846,6 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) failed_ss = ss; goto out; } - } else if (!capable(CAP_SYS_ADMIN)) { - const struct cred *cred = current_cred(), *tcred; - - /* No can_attach() - check perms generically */ - tcred = __task_cred(tsk); - if (cred->euid != tcred->uid && - cred->euid != tcred->suid) { - return -EACCES; - } } if (ss->can_attach_task) { retval = ss->can_attach_task(cgrp, tsk); diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 6ebda1df9b70..e691818d7e45 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -164,14 +164,6 @@ static int freezer_can_attach(struct cgroup_subsys *ss, { struct freezer *freezer; - if ((current != task) && (!capable(CAP_SYS_ADMIN))) { - const struct cred *cred = current_cred(), *tcred; - - tcred = __task_cred(task); - if (cred->euid != tcred->uid && cred->euid != tcred->suid) - return -EPERM; - } - /* * Anything frozen can't move or be moved to/from. */ diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 3b2305163da4..10131fdaff70 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1373,13 +1373,6 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, { struct cpuset *cs = cgroup_cs(cont); - if ((current != task) && (!capable(CAP_SYS_ADMIN))) { - const struct cred *cred = current_cred(), *tcred; - - if (cred->euid != tcred->uid && cred->euid != tcred->suid) - return -EPERM; - } - if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) return -ENOSPC; diff --git a/kernel/sched.c b/kernel/sched.c index 27ee31e5673e..7de220e79dff 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8968,15 +8968,6 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) static int cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) { - if ((current != tsk) && (!capable(CAP_SYS_NICE))) { - const struct cred *cred = current_cred(), *tcred; - - tcred = __task_cred(tsk); - - if (cred->euid != tcred->uid && cred->euid != tcred->suid) - return -EPERM; - } - #ifdef CONFIG_RT_GROUP_SCHED if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) return -EINVAL; -- cgit v1.2.3 From a237ee7830a0445a27af7fd24bdc3414b9553d35 Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Tue, 12 Jul 2011 19:53:24 -0700 Subject: cgroup: Add generic cgroup subsystem permission checks Rather than using explicit euid == 0 checks when trying to move tasks into a cgroup via CFS, move permission checks into each specific cgroup subsystem. If a subsystem does not specify a 'allow_attach' handler, then we fall back to doing our checks the old way. Use the 'allow_attach' handler for the 'cpu' cgroup to allow non-root processes to add arbitrary processes to a 'cpu' cgroup if it has the CAP_SYS_NICE capability set. This version of the patch adds a 'allow_attach' handler instead of reusing the 'can_attach' handler. If the 'can_attach' handler is reused, a new cgroup that implements 'can_attach' but not the permission checks could end up with no permission checks at all. Change-Id: Icfa950aa9321d1ceba362061d32dc7dfa2c64f0c Original-Author: San Mehat Signed-off-by: Colin Cross --- kernel/cgroup.c | 31 ++++++++++++++++++++++++++++--- kernel/sched.c | 15 +++++++++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 84dd3818633f..bab5a7911b84 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2202,6 +2202,24 @@ out_free_group_list: return retval; } +static int cgroup_allow_attach(struct cgroup *cgrp, struct task_struct *tsk) +{ + struct cgroup_subsys *ss; + int ret; + + for_each_subsys(cgrp->root, ss) { + if (ss->allow_attach) { + ret = ss->allow_attach(cgrp, tsk); + if (ret) + return ret; + } else { + return -EACCES; + } + } + + return 0; +} + /* * Find the task_struct of the task to attach by vpid and pass it along to the * function to attach either it or all tasks in its threadgroup. Will take @@ -2247,9 +2265,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) if (cred->euid && cred->euid != tcred->uid && cred->euid != tcred->suid) { - rcu_read_unlock(); - cgroup_unlock(); - return -EACCES; + /* + * if the default permission check fails, give each + * cgroup a chance to extend the permission check + */ + ret = cgroup_allow_attach(cgrp, tsk); + if (ret) { + rcu_read_unlock(); + cgroup_unlock(); + return ret; + } } get_task_struct(tsk); rcu_read_unlock(); diff --git a/kernel/sched.c b/kernel/sched.c index 7de220e79dff..e7ec90424ded 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8965,6 +8965,20 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) sched_destroy_group(tg); } +static int +cpu_cgroup_allow_attach(struct cgroup *cgrp, struct task_struct *tsk) +{ + const struct cred *cred = current_cred(), *tcred; + + tcred = __task_cred(tsk); + + if ((current != tsk) && !capable(CAP_SYS_NICE) && + cred->euid != tcred->uid && cred->euid != tcred->suid) + return -EACCES; + + return 0; +} + static int cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) { @@ -9070,6 +9084,7 @@ struct cgroup_subsys cpu_cgroup_subsys = { .name = "cpu", .create = cpu_cgroup_create, .destroy = cpu_cgroup_destroy, + .allow_attach = cpu_cgroup_allow_attach, .can_attach_task = cpu_cgroup_can_attach_task, .attach_task = cpu_cgroup_attach_task, .exit = cpu_cgroup_exit, -- cgit v1.2.3 From f65de758b3d84a085bfe3fc769c4650bcf62ffa4 Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Wed, 15 Jun 2011 17:21:57 -0700 Subject: Move x86_64 idle notifiers to generic Move the x86_64 idle notifiers originally by Andi Kleen and Venkatesh Pallipadi to generic. Change-Id: Idf29cda15be151f494ff245933c12462643388d5 Acked-by: Nicolas Pitre Signed-off-by: Todd Poynor --- kernel/cpu.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'kernel') diff --git a/kernel/cpu.c b/kernel/cpu.c index 12b7458f23b1..404770761a4e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -594,3 +594,23 @@ void init_cpu_online(const struct cpumask *src) { cpumask_copy(to_cpumask(cpu_online_bits), src); } + +static ATOMIC_NOTIFIER_HEAD(idle_notifier); + +void idle_notifier_register(struct notifier_block *n) +{ + atomic_notifier_chain_register(&idle_notifier, n); +} +EXPORT_SYMBOL_GPL(idle_notifier_register); + +void idle_notifier_unregister(struct notifier_block *n) +{ + atomic_notifier_chain_unregister(&idle_notifier, n); +} +EXPORT_SYMBOL_GPL(idle_notifier_unregister); + +void idle_notifier_call_chain(unsigned long val) +{ + atomic_notifier_call_chain(&idle_notifier, val, NULL); +} +EXPORT_SYMBOL_GPL(idle_notifier_call_chain); -- cgit v1.2.3 From f5c6caac3f40ef6d14ed15f886838336ca5b213c Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Tue, 19 Jul 2011 12:33:45 -0700 Subject: power: Add option to log time spent in suspend Prints the time spent in suspend in the kernel log, and keeps statistics on the time spent in suspend in /sys/kernel/debug/suspend_time Change-Id: Ia6b9ebe4baa0f7f5cd211c6a4f7e813aefd3fa1d Signed-off-by: Colin Cross Signed-off-by: Todd Poynor --- kernel/power/Kconfig | 7 +++ kernel/power/Makefile | 1 + kernel/power/suspend_time.c | 111 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+) create mode 100644 kernel/power/suspend_time.c (limited to 'kernel') diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 3a27c53a0508..fcf5a834c4ec 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -302,3 +302,10 @@ config PM_GENERIC_DOMAINS config PM_GENERIC_DOMAINS_RUNTIME def_bool y depends on PM_RUNTIME && PM_GENERIC_DOMAINS + +config SUSPEND_TIME + bool "Log time spent in suspend" + ---help--- + Prints the time spent in suspend in the kernel log, and + keeps statistics on the time spent in suspend in + /sys/kernel/debug/suspend_time diff --git a/kernel/power/Makefile b/kernel/power/Makefile index 493f19d2a293..9b224e16b191 100644 --- a/kernel/power/Makefile +++ b/kernel/power/Makefile @@ -13,5 +13,6 @@ obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o +obj-$(CONFIG_SUSPEND_TIME) += suspend_time.o obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o diff --git a/kernel/power/suspend_time.c b/kernel/power/suspend_time.c new file mode 100644 index 000000000000..d2a65da9f22c --- /dev/null +++ b/kernel/power/suspend_time.c @@ -0,0 +1,111 @@ +/* + * debugfs file to track time spent in suspend + * + * Copyright (c) 2011, Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct timespec suspend_time_before; +static unsigned int time_in_suspend_bins[32]; + +#ifdef CONFIG_DEBUG_FS +static int suspend_time_debug_show(struct seq_file *s, void *data) +{ + int bin; + seq_printf(s, "time (secs) count\n"); + seq_printf(s, "------------------\n"); + for (bin = 0; bin < 32; bin++) { + if (time_in_suspend_bins[bin] == 0) + continue; + seq_printf(s, "%4d - %4d %4u\n", + bin ? 1 << (bin - 1) : 0, 1 << bin, + time_in_suspend_bins[bin]); + } + return 0; +} + +static int suspend_time_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, suspend_time_debug_show, NULL); +} + +static const struct file_operations suspend_time_debug_fops = { + .open = suspend_time_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init suspend_time_debug_init(void) +{ + struct dentry *d; + + d = debugfs_create_file("suspend_time", 0755, NULL, NULL, + &suspend_time_debug_fops); + if (!d) { + pr_err("Failed to create suspend_time debug file\n"); + return -ENOMEM; + } + + return 0; +} + +late_initcall(suspend_time_debug_init); +#endif + +static int suspend_time_syscore_suspend(void) +{ + read_persistent_clock(&suspend_time_before); + + return 0; +} + +static void suspend_time_syscore_resume(void) +{ + struct timespec after; + + read_persistent_clock(&after); + + after = timespec_sub(after, suspend_time_before); + + time_in_suspend_bins[fls(after.tv_sec)]++; + + pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec, + after.tv_nsec / NSEC_PER_MSEC); +} + +static struct syscore_ops suspend_time_syscore_ops = { + .suspend = suspend_time_syscore_suspend, + .resume = suspend_time_syscore_resume, +}; + +static int suspend_time_syscore_init(void) +{ + register_syscore_ops(&suspend_time_syscore_ops); + + return 0; +} + +static void suspend_time_syscore_exit(void) +{ + unregister_syscore_ops(&suspend_time_syscore_ops); +} +module_init(suspend_time_syscore_init); +module_exit(suspend_time_syscore_exit); -- cgit v1.2.3 From ddea761d060d85fd4ecac1f522fa2f64ec16ce00 Mon Sep 17 00:00:00 2001 From: Erik Gilling Date: Thu, 21 Jul 2011 14:07:45 -0700 Subject: earlysuspend: add verbose debug flag when enabled, prints out the function of each handler as they are called Change-Id: I5ed251867e0e3aa3cd05f030ff3579808cedd0c2 Signed-off-by: Erik Gilling --- kernel/power/earlysuspend.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'kernel') diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c index 84bed51dcdce..b15f02eba45c 100644 --- a/kernel/power/earlysuspend.c +++ b/kernel/power/earlysuspend.c @@ -26,6 +26,7 @@ enum { DEBUG_USER_STATE = 1U << 0, DEBUG_SUSPEND = 1U << 2, + DEBUG_VERBOSE = 1U << 3, }; static int debug_mask = DEBUG_USER_STATE; module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); @@ -94,8 +95,11 @@ static void early_suspend(struct work_struct *work) if (debug_mask & DEBUG_SUSPEND) pr_info("early_suspend: call handlers\n"); list_for_each_entry(pos, &early_suspend_handlers, link) { - if (pos->suspend != NULL) + if (pos->suspend != NULL) { + if (debug_mask & DEBUG_VERBOSE) + pr_info("early_suspend: calling %pf\n", pos->suspend); pos->suspend(pos); + } } mutex_unlock(&early_suspend_lock); @@ -131,9 +135,14 @@ static void late_resume(struct work_struct *work) } if (debug_mask & DEBUG_SUSPEND) pr_info("late_resume: call handlers\n"); - list_for_each_entry_reverse(pos, &early_suspend_handlers, link) - if (pos->resume != NULL) + list_for_each_entry_reverse(pos, &early_suspend_handlers, link) { + if (pos->resume != NULL) { + if (debug_mask & DEBUG_VERBOSE) + pr_info("late_resume: calling %pf\n", pos->resume); + pos->resume(pos); + } + } if (debug_mask & DEBUG_SUSPEND) pr_info("late_resume: done\n"); abort: -- cgit v1.2.3 From c7f4f9b136a58702248f08497cf31f06e8078e76 Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Mon, 25 Jul 2011 14:54:03 -0700 Subject: PM: Fix printing IRQ names for pending wakeup IRQs The IRQ name has moved to the struct irqaction list (so print first action's name). Change-Id: I65a627457f9abaf7c1dcc32d8814243ba2ff4717 Signed-off-by: Todd Poynor --- kernel/irq/pm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 1f455f36fba3..fe4b09cf829c 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c @@ -106,7 +106,9 @@ int check_wakeup_irqs(void) if (irqd_is_wakeup_set(&desc->irq_data)) { if (desc->istate & IRQS_PENDING) { pr_info("Wakeup IRQ %d %s pending, suspend aborted\n", - irq, desc->name ? desc->name : ""); + irq, + desc->action && desc->action->name ? + desc->action->name : ""); return -EBUSY; } continue; -- cgit v1.2.3 From 2b2f237feabf4f162525c77e597e088bdd41c632 Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Mon, 8 Aug 2011 17:26:49 -0700 Subject: PM: wakelocks: Don't report wake up wakelock if suspend aborted If the wakelock driver aborts suspend due to an already-held wakelock, don't report the next wakelock held as the "wake up wakelock". Change-Id: I582ffbb87a3c361739a77d839a0c62921cff11a6 Signed-off-by: Todd Poynor --- kernel/power/wakelock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index c10d0ee7907e..d45df2b151b7 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -311,7 +311,7 @@ static int power_suspend_late(struct device *dev) { int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0; #ifdef CONFIG_WAKELOCK_STAT - wait_for_wakeup = 1; + wait_for_wakeup = !ret; #endif if (debug_mask & DEBUG_SUSPEND) pr_info("power_suspend_late return %d\n", ret); -- cgit v1.2.3 From 87ca07903f6dcc2d306a4bff69d0e49abc8d4b6e Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Mon, 8 Aug 2011 16:06:54 -0700 Subject: PM: wakelocks: Display wakelocks preventing suspend by default Use DEBUG_WAKEUP flag to show wakelocks that abort suspend, in addition to showing wakelocks held during system resume. DEBUG_WAKEUP is enabled by default. Change-Id: If6fa68e8afbc482a5300ffab2964694b02b34f41 Signed-off-by: Todd Poynor --- kernel/power/wakelock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index d45df2b151b7..2ee459fe4456 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -249,7 +249,7 @@ long has_wake_lock(int type) unsigned long irqflags; spin_lock_irqsave(&list_lock, irqflags); ret = has_wake_lock_locked(type); - if (ret && (debug_mask & DEBUG_SUSPEND) && type == WAKE_LOCK_SUSPEND) + if (ret && (debug_mask & DEBUG_WAKEUP) && type == WAKE_LOCK_SUSPEND) print_active_locks(type); spin_unlock_irqrestore(&list_lock, irqflags); return ret; -- cgit v1.2.3 From df760f56dfdb8ece4d43f79afa3f6d4d32a1baa9 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Wed, 17 Aug 2011 18:31:58 -0500 Subject: panic: Add board ID to panic output At times, it is necessary for boards to provide some additional information as part of panic logs. Provide information on the board hardware as part of panic logs. It is safer to print this information at the very end in case something bad happens as part of the information retrieval itself. To use this, set global mach_panic_string to an appropriate string in the board file. Change-Id: Id12cdda87b0cd2940dd01d52db97e6162f671b4d Signed-off-by: Nishanth Menon --- kernel/panic.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'kernel') diff --git a/kernel/panic.c b/kernel/panic.c index cdb86b128328..41fc78ea3db9 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -27,6 +27,9 @@ #define PANIC_TIMER_STEP 100 #define PANIC_BLINK_SPD 18 +/* Machine specific panic information string */ +char *mach_panic_string; + int panic_on_oops; static unsigned long tainted_mask; static int pause_on_oops; @@ -347,6 +350,11 @@ late_initcall(init_oops_id); void print_oops_end_marker(void) { init_oops_id(); + + if (mach_panic_string) + printk(KERN_WARNING "Board Information: %s\n", + mach_panic_string); + printk(KERN_WARNING "---[ end trace %016llx ]---\n", (unsigned long long)oops_id); } -- cgit v1.2.3 From dfd3a1b588094da9140cdfb80cf9005bfe41b99e Mon Sep 17 00:00:00 2001 From: Todd Poynor Date: Thu, 25 Aug 2011 19:29:45 -0700 Subject: PM: Backoff suspend if repeated attempts fail Change-Id: I32289676d95a307ea3aa5e78f6c126ca979c0fec Signed-off-by: Todd Poynor --- kernel/power/wakelock.c | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) (limited to 'kernel') diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c index 2ee459fe4456..81e1b7c65ca1 100644 --- a/kernel/power/wakelock.c +++ b/kernel/power/wakelock.c @@ -48,6 +48,12 @@ struct workqueue_struct *suspend_work_queue; struct wake_lock main_wake_lock; suspend_state_t requested_suspend_state = PM_SUSPEND_MEM; static struct wake_lock unknown_wakeup; +static struct wake_lock suspend_backoff_lock; + +#define SUSPEND_BACKOFF_THRESHOLD 10 +#define SUSPEND_BACKOFF_INTERVAL 10000 + +static unsigned suspend_short_count; #ifdef CONFIG_WAKELOCK_STAT static struct wake_lock deleted_wake_locks; @@ -255,10 +261,18 @@ long has_wake_lock(int type) return ret; } +static void suspend_backoff(void) +{ + pr_info("suspend: too many immediate wakeups, back off\n"); + wake_lock_timeout(&suspend_backoff_lock, + msecs_to_jiffies(SUSPEND_BACKOFF_INTERVAL)); +} + static void suspend(struct work_struct *work) { int ret; int entry_event_num; + struct timespec ts_entry, ts_exit; if (has_wake_lock(WAKE_LOCK_SUSPEND)) { if (debug_mask & DEBUG_SUSPEND) @@ -270,17 +284,30 @@ static void suspend(struct work_struct *work) sys_sync(); if (debug_mask & DEBUG_SUSPEND) pr_info("suspend: enter suspend\n"); + getnstimeofday(&ts_entry); ret = pm_suspend(requested_suspend_state); + getnstimeofday(&ts_exit); + if (debug_mask & DEBUG_EXIT_SUSPEND) { - struct timespec ts; struct rtc_time tm; - getnstimeofday(&ts); - rtc_time_to_tm(ts.tv_sec, &tm); + rtc_time_to_tm(ts_exit.tv_sec, &tm); pr_info("suspend: exit suspend, ret = %d " "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, - tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); + tm.tm_hour, tm.tm_min, tm.tm_sec, ts_exit.tv_nsec); } + + if (ts_exit.tv_sec - ts_entry.tv_sec <= 1) { + ++suspend_short_count; + + if (suspend_short_count == SUSPEND_BACKOFF_THRESHOLD) { + suspend_backoff(); + suspend_short_count = 0; + } + } else { + suspend_short_count = 0; + } + if (current_event_num == entry_event_num) { if (debug_mask & DEBUG_SUSPEND) pr_info("suspend: pm_suspend returned with no event\n"); @@ -547,6 +574,8 @@ static int __init wakelocks_init(void) wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main"); wake_lock(&main_wake_lock); wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups"); + wake_lock_init(&suspend_backoff_lock, WAKE_LOCK_SUSPEND, + "suspend_backoff"); ret = platform_device_register(&power_device); if (ret) { @@ -576,6 +605,7 @@ err_suspend_work_queue: err_platform_driver_register: platform_device_unregister(&power_device); err_platform_device_register: + wake_lock_destroy(&suspend_backoff_lock); wake_lock_destroy(&unknown_wakeup); wake_lock_destroy(&main_wake_lock); #ifdef CONFIG_WAKELOCK_STAT @@ -592,6 +622,7 @@ static void __exit wakelocks_exit(void) destroy_workqueue(suspend_work_queue); platform_driver_unregister(&power_driver); platform_device_unregister(&power_device); + wake_lock_destroy(&suspend_backoff_lock); wake_lock_destroy(&unknown_wakeup); wake_lock_destroy(&main_wake_lock); #ifdef CONFIG_WAKELOCK_STAT -- cgit v1.2.3