From 4d9a2c8746671efbb0c27d3ae28c7474597a7aad Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 12 May 2016 18:29:20 +0200 Subject: dax: Remove i_mmap_lock protection Currently faults are protected against truncate by filesystem specific i_mmap_sem and page lock in case of hole page. Cow faults are protected DAX radix tree entry locking. So there's no need for i_mmap_lock in DAX code. Remove it. Reviewed-by: Ross Zwisler Signed-off-by: Jan Kara Signed-off-by: Ross Zwisler --- fs/dax.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'fs/dax.c') diff --git a/fs/dax.c b/fs/dax.c index be74635e05a6..6dbe6021cab7 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -798,29 +798,19 @@ static int dax_insert_mapping(struct address_space *mapping, .sector = to_sector(bh, mapping->host), .size = bh->b_size, }; - int error; void *ret; void *entry = *entryp; - i_mmap_lock_read(mapping); - - if (dax_map_atomic(bdev, &dax) < 0) { - error = PTR_ERR(dax.addr); - goto out; - } + if (dax_map_atomic(bdev, &dax) < 0) + return PTR_ERR(dax.addr); dax_unmap_atomic(bdev, &dax); ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector); - if (IS_ERR(ret)) { - error = PTR_ERR(ret); - goto out; - } + if (IS_ERR(ret)) + return PTR_ERR(ret); *entryp = ret; - error = vm_insert_mixed(vma, vaddr, dax.pfn); - out: - i_mmap_unlock_read(mapping); - return error; + return vm_insert_mixed(vma, vaddr, dax.pfn); } /** @@ -1058,8 +1048,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, truncate_pagecache_range(inode, lstart, lend); } - i_mmap_lock_read(mapping); - if (!write && !buffer_mapped(&bh)) { spinlock_t *ptl; pmd_t entry; @@ -1148,8 +1136,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address, } out: - i_mmap_unlock_read(mapping); - return result; fallback: -- cgit v1.2.3