summaryrefslogtreecommitdiff
path: root/fs/erofs/zdata.c
diff options
context:
space:
mode:
authorGao Xiang <gaoxiang25@huawei.com>2019-09-04 10:09:05 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-09-05 20:10:08 +0200
commit99634bf388db04048b83a075358a1d166e7300fb (patch)
tree101429479b254d807a2e762fa2a4851a490e1b4c /fs/erofs/zdata.c
parent94e4e153b1c25a49b4953c424e6e2f66efb449f3 (diff)
erofs: add "erofs_" prefix for common and short functions
Add erofs_ prefix to free_inode, alloc_inode, ... Reported-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Link: https://lore.kernel.org/r/20190904020912.63925-19-gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/erofs/zdata.c')
-rw-r--r--fs/erofs/zdata.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 3010fa3d1ac3..8587d6751c48 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -40,7 +40,7 @@ void z_erofs_exit_zip_subsystem(void)
kmem_cache_destroy(pcluster_cachep);
}
-static inline int init_unzip_workqueue(void)
+static inline int z_erofs_init_workqueue(void)
{
const unsigned int onlinecpus = num_possible_cpus();
const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE;
@@ -54,7 +54,7 @@ static inline int init_unzip_workqueue(void)
return z_erofs_workqueue ? 0 : -ENOMEM;
}
-static void init_once(void *ptr)
+static void z_erofs_pcluster_init_once(void *ptr)
{
struct z_erofs_pcluster *pcl = ptr;
struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
@@ -67,7 +67,7 @@ static void init_once(void *ptr)
pcl->compressed_pages[i] = NULL;
}
-static void init_always(struct z_erofs_pcluster *pcl)
+static void z_erofs_pcluster_init_always(struct z_erofs_pcluster *pcl)
{
struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
@@ -81,9 +81,10 @@ int __init z_erofs_init_zip_subsystem(void)
{
pcluster_cachep = kmem_cache_create("erofs_compress",
Z_EROFS_WORKGROUP_SIZE, 0,
- SLAB_RECLAIM_ACCOUNT, init_once);
+ SLAB_RECLAIM_ACCOUNT,
+ z_erofs_pcluster_init_once);
if (pcluster_cachep) {
- if (!init_unzip_workqueue())
+ if (!z_erofs_init_workqueue())
return 0;
kmem_cache_destroy(pcluster_cachep);
@@ -272,8 +273,8 @@ int erofs_try_to_free_cached_page(struct address_space *mapping,
}
/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
-static inline bool try_inplace_io(struct z_erofs_collector *clt,
- struct page *page)
+static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
+ struct page *page)
{
struct z_erofs_pcluster *const pcl = clt->pcl;
const unsigned int clusterpages = BIT(pcl->clusterbits);
@@ -296,7 +297,7 @@ static int z_erofs_attach_page(struct z_erofs_collector *clt,
/* give priority for inplaceio */
if (clt->mode >= COLLECT_PRIMARY &&
type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
- try_inplace_io(clt, page))
+ z_erofs_try_inplace_io(clt, page))
return 0;
ret = z_erofs_pagevec_enqueue(&clt->vector,
@@ -409,7 +410,7 @@ static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
if (!pcl)
return ERR_PTR(-ENOMEM);
- init_always(pcl);
+ z_erofs_pcluster_init_always(pcl);
pcl->obj.index = map->m_pa >> PAGE_SHIFT;
pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |