diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/auditsc.c | 2 | ||||
-rw-r--r-- | kernel/debug/debug_core.c | 5 | ||||
-rw-r--r-- | kernel/debug/debug_core.h | 2 | ||||
-rw-r--r-- | kernel/hung_task.c | 6 | ||||
-rw-r--r-- | kernel/irq/Kconfig | 1 | ||||
-rw-r--r-- | kernel/kmod.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 6 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 3 | ||||
-rw-r--r-- | kernel/smp.c | 68 | ||||
-rw-r--r-- | kernel/softirq.c | 2 | ||||
-rw-r--r-- | kernel/sysctl.c | 7 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 27 |
12 files changed, 50 insertions, 81 deletions
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 10176cd5956a..7aef2f4b6c64 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1719,7 +1719,7 @@ void audit_putname(struct filename *name) struct audit_context *context = current->audit_context; BUG_ON(!context); - if (!context->in_syscall) { + if (!name->aname || !context->in_syscall) { #if AUDIT_DEBUG == 2 printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n", __FILE__, __LINE__, context->serial, name); diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 7d2f35e5df2f..334b3980ffc1 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -736,7 +736,8 @@ int kgdb_nmicallback(int cpu, void *regs) return 1; } -int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *send_ready) +int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code, + atomic_t *send_ready) { #ifdef CONFIG_SMP if (!kgdb_io_ready(0) || !send_ready) @@ -750,7 +751,7 @@ int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *send_ready) ks->cpu = cpu; ks->ex_vector = trapnr; ks->signo = SIGTRAP; - ks->err_code = KGDB_KDB_REASON_SYSTEM_NMI; + ks->err_code = err_code; ks->linux_regs = regs; ks->send_ready = send_ready; kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER); diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h index 572aa4f5677c..127d9bc49fb4 100644 --- a/kernel/debug/debug_core.h +++ b/kernel/debug/debug_core.h @@ -75,13 +75,11 @@ extern int kdb_stub(struct kgdb_state *ks); extern int kdb_parse(const char *cmdstr); extern int kdb_common_init_state(struct kgdb_state *ks); extern int kdb_common_deinit_state(void); -#define KGDB_KDB_REASON_SYSTEM_NMI KDB_REASON_SYSTEM_NMI #else /* ! CONFIG_KGDB_KDB */ static inline int kdb_stub(struct kgdb_state *ks) { return DBG_PASS_EVENT; } -#define KGDB_KDB_REASON_SYSTEM_NMI 0 #endif /* CONFIG_KGDB_KDB */ #endif /* _DEBUG_CORE_H_ */ diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 9328b80eaf14..0b9c169d577f 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -37,7 +37,7 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT; */ unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; -unsigned long __read_mostly sysctl_hung_task_warnings = 10; +int __read_mostly sysctl_hung_task_warnings = 10; static int __read_mostly did_panic; @@ -98,7 +98,9 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) if (!sysctl_hung_task_warnings) return; - sysctl_hung_task_warnings--; + + if (sysctl_hung_task_warnings > 0) + sysctl_hung_task_warnings--; /* * Ok, the task did not get scheduled for more than 2 minutes, diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 4a1fef09f658..07cbdfea9ae2 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER # Generic configurable interrupt chip implementation config GENERIC_IRQ_CHIP bool + select IRQ_DOMAIN # Generic irq_domain hw <--> linux irq number translation config IRQ_DOMAIN diff --git a/kernel/kmod.c b/kernel/kmod.c index b086006c59e7..6b375af4958d 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -239,7 +239,7 @@ static int ____call_usermodehelper(void *data) commit_creds(new); - retval = do_execve(sub_info->path, + retval = do_execve(getname_kernel(sub_info->path), (const char __user *const __user *)sub_info->argv, (const char __user *const __user *)sub_info->envp); if (!retval) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7fea865a810d..b46131ef6aab 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2476,7 +2476,7 @@ u64 scheduler_tick_max_deferment(void) if (time_before_eq(next, now)) return 0; - return jiffies_to_usecs(next - now) * NSEC_PER_USEC; + return jiffies_to_nsecs(next - now); } #endif @@ -4347,7 +4347,9 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, goto out_unlock; rq = task_rq_lock(p, &flags); - time_slice = p->sched_class->get_rr_interval(rq, p); + time_slice = 0; + if (p->sched_class->get_rr_interval) + time_slice = p->sched_class->get_rr_interval(rq, p); task_rq_unlock(rq, p, &flags); rcu_read_unlock(); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0de248202879..0dd5e0971a07 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -351,7 +351,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, * disrupting the schedulability of the system. Otherwise, we should * refill the runtime and set the deadline a period in the future, * because keeping the current (absolute) deadline of the task would - * result in breaking guarantees promised to other tasks. + * result in breaking guarantees promised to other tasks (refer to + * Documentation/scheduler/sched-deadline.txt for more informations). * * This function returns true if: * diff --git a/kernel/smp.c b/kernel/smp.c index bd9f94028838..ffee35bef179 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -23,17 +23,11 @@ enum { struct call_function_data { struct call_single_data __percpu *csd; cpumask_var_t cpumask; - cpumask_var_t cpumask_ipi; }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); -struct call_single_queue { - struct list_head list; - raw_spinlock_t lock; -}; - -static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); static int hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) @@ -47,14 +41,8 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, cpu_to_node(cpu))) return notifier_from_errno(-ENOMEM); - if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, - cpu_to_node(cpu))) { - free_cpumask_var(cfd->cpumask); - return notifier_from_errno(-ENOMEM); - } cfd->csd = alloc_percpu(struct call_single_data); if (!cfd->csd) { - free_cpumask_var(cfd->cpumask_ipi); free_cpumask_var(cfd->cpumask); return notifier_from_errno(-ENOMEM); } @@ -67,7 +55,6 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD: case CPU_DEAD_FROZEN: free_cpumask_var(cfd->cpumask); - free_cpumask_var(cfd->cpumask_ipi); free_percpu(cfd->csd); break; #endif @@ -85,12 +72,8 @@ void __init call_function_init(void) void *cpu = (void *)(long)smp_processor_id(); int i; - for_each_possible_cpu(i) { - struct call_single_queue *q = &per_cpu(call_single_queue, i); - - raw_spin_lock_init(&q->lock); - INIT_LIST_HEAD(&q->list); - } + for_each_possible_cpu(i) + init_llist_head(&per_cpu(call_single_queue, i)); hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); register_cpu_notifier(&hotplug_cfd_notifier); @@ -141,18 +124,9 @@ static void csd_unlock(struct call_single_data *csd) */ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) { - struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); - unsigned long flags; - int ipi; - if (wait) csd->flags |= CSD_FLAG_WAIT; - raw_spin_lock_irqsave(&dst->lock, flags); - ipi = list_empty(&dst->list); - list_add_tail(&csd->list, &dst->list); - raw_spin_unlock_irqrestore(&dst->lock, flags); - /* * The list addition should be visible before sending the IPI * handler locks the list to pull the entry off it because of @@ -164,7 +138,7 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) * locking and barrier primitives. Generic code isn't really * equipped to do the right thing... */ - if (ipi) + if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) arch_send_call_function_single_ipi(cpu); if (wait) @@ -177,27 +151,26 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) */ void generic_smp_call_function_single_interrupt(void) { - struct call_single_queue *q = &__get_cpu_var(call_single_queue); - LIST_HEAD(list); + struct llist_node *entry, *next; /* * Shouldn't receive this interrupt on a cpu that is not yet online. */ WARN_ON_ONCE(!cpu_online(smp_processor_id())); - raw_spin_lock(&q->lock); - list_replace_init(&q->list, &list); - raw_spin_unlock(&q->lock); + entry = llist_del_all(&__get_cpu_var(call_single_queue)); + entry = llist_reverse_order(entry); - while (!list_empty(&list)) { + while (entry) { struct call_single_data *csd; - csd = list_entry(list.next, struct call_single_data, list); - list_del(&csd->list); + next = entry->next; + csd = llist_entry(entry, struct call_single_data, llist); csd->func(csd->info); - csd_unlock(csd); + + entry = next; } } @@ -402,30 +375,17 @@ void smp_call_function_many(const struct cpumask *mask, if (unlikely(!cpumask_weight(cfd->cpumask))) return; - /* - * After we put an entry into the list, cfd->cpumask may be cleared - * again when another CPU sends another IPI for a SMP function call, so - * cfd->cpumask will be zero. - */ - cpumask_copy(cfd->cpumask_ipi, cfd->cpumask); - for_each_cpu(cpu, cfd->cpumask) { struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); - struct call_single_queue *dst = - &per_cpu(call_single_queue, cpu); - unsigned long flags; csd_lock(csd); csd->func = func; csd->info = info; - - raw_spin_lock_irqsave(&dst->lock, flags); - list_add_tail(&csd->list, &dst->list); - raw_spin_unlock_irqrestore(&dst->lock, flags); + llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); } /* Send a message to all CPUs in the map */ - arch_send_call_function_ipi_mask(cfd->cpumask_ipi); + arch_send_call_function_ipi_mask(cfd->cpumask); if (wait) { for_each_cpu(cpu, cfd->cpumask) { diff --git a/kernel/softirq.c b/kernel/softirq.c index 850967068aaf..490fcbb1dc5b 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -328,7 +328,7 @@ void irq_enter(void) * here, as softirq will be serviced on return from interrupt. */ local_bh_disable(); - tick_check_idle(); + tick_irq_enter(); _local_bh_enable(); } diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 096db7452cbd..49e13e1f8fe6 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -121,6 +121,8 @@ extern int blk_iopoll_enabled; static int sixty = 60; #endif +static int __maybe_unused neg_one = -1; + static int zero; static int __maybe_unused one = 1; static int __maybe_unused two = 2; @@ -997,9 +999,10 @@ static struct ctl_table kern_table[] = { { .procname = "hung_task_warnings", .data = &sysctl_hung_task_warnings, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_doulongvec_minmax, + .proc_handler = proc_dointvec_minmax, + .extra1 = &neg_one, }, #endif #ifdef CONFIG_COMPAT diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 08cb0c3b8ccb..9f8af69c67ec 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -533,12 +533,13 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; u64 time_delta; + time_delta = timekeeping_max_deferment(); + /* Read jiffies and the time when jiffies were updated last */ do { seq = read_seqbegin(&jiffies_lock); last_update = last_jiffies_update; last_jiffies = jiffies; - time_delta = timekeeping_max_deferment(); } while (read_seqretry(&jiffies_lock, seq)); if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || @@ -678,18 +679,18 @@ out: static void tick_nohz_full_stop_tick(struct tick_sched *ts) { #ifdef CONFIG_NO_HZ_FULL - int cpu = smp_processor_id(); + int cpu = smp_processor_id(); - if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) - return; + if (!tick_nohz_full_cpu(cpu) || is_idle_task(current)) + return; - if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) - return; + if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) + return; - if (!can_stop_full_tick()) - return; + if (!can_stop_full_tick()) + return; - tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); + tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); #endif } @@ -1023,7 +1024,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now) #endif } -static inline void tick_check_nohz_this_cpu(void) +static inline void tick_nohz_irq_enter(void) { struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); ktime_t now; @@ -1042,17 +1043,17 @@ static inline void tick_check_nohz_this_cpu(void) #else static inline void tick_nohz_switch_to_nohz(void) { } -static inline void tick_check_nohz_this_cpu(void) { } +static inline void tick_nohz_irq_enter(void) { } #endif /* CONFIG_NO_HZ_COMMON */ /* * Called from irq_enter to notify about the possible interruption of idle() */ -void tick_check_idle(void) +void tick_irq_enter(void) { tick_check_oneshot_broadcast_this_cpu(); - tick_check_nohz_this_cpu(); + tick_nohz_irq_enter(); } /* |