diff options
author | Hiroshi Doyu <hdoyu@nvidia.com> | 2013-09-06 13:26:09 +0300 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2013-09-16 17:41:20 -0700 |
commit | 413820dee9865421530c7523170baa00251790ba (patch) | |
tree | f46946c7050d42aa535c068958316c3ce87c34c2 /drivers/iommu | |
parent | 07738ff204574a94312f73e0aca91c1bc0446f9a (diff) |
Revert "iommu/tegra: smmu: fix perf regression with map_sg"
This reverts commit da57b0c27246871c93f5e541ba8803de95c311bf.
No perf improvement but better to have smaller preemption latency.
Bug 1290869
Change-Id: I368381c82f42ef0baf9cdd573f97ea9e9724923a
Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
Reviewed-on: http://git-master/r/271446
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/tegra-smmu.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 6efe1bb0b183..4ec354968df4 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -1196,7 +1196,6 @@ out: static int smmu_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sgl, int nents, int prot) { - unsigned long flags; unsigned int count; struct scatterlist *s; int err = 0; @@ -1211,10 +1210,12 @@ static int smmu_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, else if (dma_get_attr(DMA_ATTR_WRITE_ONLY, (struct dma_attrs *)prot)) attrs &= ~_READABLE; - spin_lock_irqsave(&as->lock, flags); for (count = 0, s = sgl; count < nents; s = sg_next(s)) { phys_addr_t phys = page_to_phys(sg_page(s)); unsigned int len = PAGE_ALIGN(s->offset + s->length); + unsigned long flags; + + spin_lock_irqsave(&as->lock, flags); while (len) { int pfn = __phys_to_pfn(phys); @@ -1267,13 +1268,13 @@ skip: count += num; } + spin_unlock_irqrestore(&as->lock, flags); } if (flush_all) flush_ptc_and_tlb_as(as, iova_base, iova_base + nents * PAGE_SIZE); - spin_unlock_irqrestore(&as->lock, flags); return err; } |