diff options
author | Hiro Sugawara <hsugawara@nvidia.com> | 2011-08-24 16:37:12 -0700 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2011-11-30 21:48:40 -0800 |
commit | 9f71d8dffae5e61fd7bb298c5d561fa80163afb1 (patch) | |
tree | 3c5e3bef7046c34cb40e378bd96cbdabe27140f7 | |
parent | bceb821e47648ff7e63be15af9373a8c3ad11852 (diff) |
arm: tegra: iovmm: Change spinlock to mutex in IOVMM
Bug 862658
spinlock is not only overkill for read-only list protection, but also
affects lower layer's atomicity.
Change-Id: I27688c9890f888068f4ffc7b9d22eacb19c946e8
Reviewed-on: http://git-master/r/49092
Reviewed-by: Daniel Willemsen <dwillemsen@nvidia.com>
Tested-by: Daniel Willemsen <dwillemsen@nvidia.com>
Rebase-Id: R251ec43706e88ee64e201b386fe4b35cbe41f1ba
-rw-r--r-- | arch/arm/mach-tegra/iovmm.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm/mach-tegra/iovmm.c b/arch/arm/mach-tegra/iovmm.c index 1e91d8071678..e792443dc377 100644 --- a/arch/arm/mach-tegra/iovmm.c +++ b/arch/arm/mach-tegra/iovmm.c @@ -75,7 +75,7 @@ struct iovmm_share_group { static LIST_HEAD(iovmm_devices); static LIST_HEAD(iovmm_groups); static DEFINE_MUTEX(iovmm_group_list_lock); -static DEFINE_SPINLOCK(iovmm_device_list_lock); +static DEFINE_MUTEX(iovmm_device_list_lock); static struct kmem_cache *iovmm_cache; static tegra_iovmm_addr_t iovmm_align_up(struct tegra_iovmm_device *dev, @@ -814,12 +814,12 @@ struct tegra_iovmm_client *tegra_iovmm_alloc_client(const char *name, kfree(grp); goto fail_lock; } - spin_lock_irqsave(&iovmm_device_list_lock, flags); + mutex_lock(&iovmm_device_list_lock); list_for_each_entry(dev, &iovmm_devices, list) { grp->domain = dev->ops->alloc_domain(dev, c); if (grp->domain) break; } - spin_unlock_irqrestore(&iovmm_device_list_lock, flags); + mutex_unlock(&iovmm_device_list_lock); if (!grp->domain) { pr_err("%s: alloc_domain failed for %s\n", __func__, c->name); @@ -887,7 +887,7 @@ static int tegra_iovmm_suspend(void) struct tegra_iovmm_device *dev; unsigned long flags; - spin_lock_irqsave(&iovmm_device_list_lock, flags); + mutex_lock(&iovmm_device_list_lock); list_for_each_entry(dev, &iovmm_devices, list) { if (!dev->ops->suspend) @@ -897,11 +897,11 @@ static int tegra_iovmm_suspend(void) if (rc) { pr_err("%s: %s suspend returned %d\n", __func__, dev->name, rc); - spin_unlock_irqrestore(&iovmm_device_list_lock, flags); + mutex_unlock(&iovmm_device_list_lock); return rc; } } - spin_unlock_irqrestore(&iovmm_device_list_lock, flags); + mutex_unlock(&iovmm_device_list_lock); return 0; } @@ -910,14 +910,14 @@ static void tegra_iovmm_resume(void) struct tegra_iovmm_device *dev; unsigned long flags; - spin_lock_irqsave(&iovmm_device_list_lock, flags); + mutex_lock(&iovmm_device_list_lock); list_for_each_entry(dev, &iovmm_devices, list) { if (dev->ops->resume) dev->ops->resume(dev); } - spin_unlock_irqrestore(&iovmm_device_list_lock, flags); + mutex_unlock(&iovmm_device_list_lock); } static struct syscore_ops tegra_iovmm_syscore_ops = { |