summaryrefslogtreecommitdiff
path: root/arch/riscv
diff options
context:
space:
mode:
authorVincent Chen <vincentc@andestech.com>2019-01-03 11:32:33 +0800
committerPalmer Dabbelt <palmer@sifive.com>2019-01-23 12:56:19 -0800
commit99fd6e875d0c24448a5e2c241422a691be46b241 (patch)
tree4b253b38db1d5a3653ef38fd01d3fb0599c877de /arch/riscv
parent49a57857aeea06ca831043acbb0fa5e0f50602fd (diff)
RISC-V: Add _TIF_NEED_RESCHED check for kernel thread when CONFIG_PREEMPT=y
The cond_resched() can be used to yield the CPU resource if CONFIG_PREEMPT is not defined. Otherwise, cond_resched() is a dummy function. In order to avoid kernel thread occupying entire CPU, when CONFIG_PREEMPT=y, the kernel thread needs to follow the rescheduling mechanism like a user thread. Signed-off-by: Vincent Chen <vincentc@andestech.com> Tested-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Palmer Dabbelt <palmer@sifive.com>
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/kernel/asm-offsets.c1
-rw-r--r--arch/riscv/kernel/entry.S18
2 files changed, 18 insertions, 1 deletions
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 6a92a2fe198e..dac98348c6a3 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -39,6 +39,7 @@ void asm_offsets(void)
OFFSET(TASK_STACK, task_struct, stack);
OFFSET(TASK_TI, task_struct, thread_info);
OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
+ OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 355166f57205..fd9b57c8b4ce 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -144,6 +144,10 @@ _save_context:
REG_L x2, PT_SP(sp)
.endm
+#if !IS_ENABLED(CONFIG_PREEMPT)
+.set resume_kernel, restore_all
+#endif
+
ENTRY(handle_exception)
SAVE_ALL
@@ -228,7 +232,7 @@ ret_from_exception:
REG_L s0, PT_SSTATUS(sp)
csrc sstatus, SR_SIE
andi s0, s0, SR_SPP
- bnez s0, restore_all
+ bnez s0, resume_kernel
resume_userspace:
/* Interrupts must be disabled here so flags are checked atomically */
@@ -250,6 +254,18 @@ restore_all:
RESTORE_ALL
sret
+#if IS_ENABLED(CONFIG_PREEMPT)
+resume_kernel:
+ REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
+ bnez s0, restore_all
+need_resched:
+ REG_L s0, TASK_TI_FLAGS(tp)
+ andi s0, s0, _TIF_NEED_RESCHED
+ beqz s0, restore_all
+ call preempt_schedule_irq
+ j need_resched
+#endif
+
work_pending:
/* Enter slow path for supplementary processing */
la ra, ret_from_exception