From afa1b55dfb1a9d9c8d4158ca0625d200fa7c3b3b Mon Sep 17 00:00:00 2001 From: Max Krummenacher Date: Thu, 19 Dec 2019 11:21:52 +0100 Subject: [PATCH 1/5] sources: prepare for rt patch patch-4.14.155-rt70.patch does not apply to the 4.14.159 downstream kernel. Change the sources so that it does apply and revert afterwards. Signed-off-by: Max Krummenacher --- arch/arm/mach-imx/cpuidle-imx6q.c | 30 ++++++++++-------------------- fs/nfs/delegation.c | 2 +- 2 files changed, 11 insertions(+), 21 deletions(-) diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index 02d55ae7e0eb..2a2129c1cdb8 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c @@ -16,34 +16,24 @@ #include "cpuidle.h" #include "hardware.h" -static atomic_t master = ATOMIC_INIT(0); -static DEFINE_SPINLOCK(master_lock); +static int num_idle_cpus = 0; +static DEFINE_SPINLOCK(cpuidle_lock); static int imx6q_enter_wait(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - if (atomic_inc_return(&master) == num_online_cpus()) { - /* - * With this lock, we prevent other cpu to exit and enter - * this function again and become the master. - */ - if (!spin_trylock(&master_lock)) - goto idle; + spin_lock(&cpuidle_lock); + if (++num_idle_cpus == num_online_cpus()) imx6_set_lpm(WAIT_UNCLOCKED); - if (atomic_read(&master) != num_online_cpus()) - imx6_set_lpm(WAIT_CLOCKED); - cpu_do_idle(); - imx6_set_lpm(WAIT_CLOCKED); - spin_unlock(&master_lock); - goto done; - } + spin_unlock(&cpuidle_lock); -idle: cpu_do_idle(); -done: - atomic_dec(&master); - imx6_set_lpm(WAIT_CLOCKED); + spin_lock(&cpuidle_lock); + if (num_idle_cpus-- == num_online_cpus()) + imx6_set_lpm(WAIT_CLOCKED); + spin_unlock(&cpuidle_lock); + return index; } diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 04d57e11577e..7b59dbdd83a4 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -163,7 +163,7 @@ static int nfs_delegation_claim_opens(struct inode *inode, seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); err = nfs4_open_delegation_recall(ctx, state, stateid, type); if (!err) - err = nfs_delegation_claim_locks(state, stateid); + err = nfs_delegation_claim_locks(ctx, state, stateid); if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) err = -EAGAIN; mutex_unlock(&sp->so_delegreturn_mutex); -- 2.20.1