summaryrefslogtreecommitdiff
path: root/arch/arm/mach-tegra/iovmm.c
diff options
context:
space:
mode:
authorHiroshi DOYU <hdoyu@nvidia.com>2011-11-21 14:22:37 +0200
committerSimone Willett <swillett@nvidia.com>2012-02-09 12:54:35 -0800
commit5fdce107e444534b07c1f57f119d5cb1388e46d6 (patch)
treed39edac3d4aa59bedbb5ee085741d1d22657c5dd /arch/arm/mach-tegra/iovmm.c
parent18c630a3a03b83596c93612250edb4970f8a8b14 (diff)
ARM: tegra: iovmm: Use all capital letters for MACRO
Use all capital letters for MACRO Signed-off-by: Hiroshi DOYU <hdoyu@nvidia.com> Reviewed-on: http://git-master/r/66371 (cherry picked from commit 48100a354bf4ec447c1b0eefa968322894907bca) Change-Id: I3429b68bcc9228a4b74b50c95156f8df1c1c19a0 Signed-off-by: Pritesh Raithatha <praithatha@nvidia.com> Reviewed-on: http://git-master/r/79990 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com> Tested-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'arch/arm/mach-tegra/iovmm.c')
-rw-r--r--arch/arm/mach-tegra/iovmm.c59
1 files changed, 30 insertions, 29 deletions
diff --git a/arch/arm/mach-tegra/iovmm.c b/arch/arm/mach-tegra/iovmm.c
index 784a49ad3c66..6112128cb743 100644
--- a/arch/arm/mach-tegra/iovmm.c
+++ b/arch/arm/mach-tegra/iovmm.c
@@ -45,14 +45,14 @@
#define iovmm_end(_b) (iovmm_start(_b) + iovmm_length(_b))
/* flags for the block */
-#define BK_free 0 /* indicates free mappings */
-#define BK_map_dirty 1 /* used by demand-loaded mappings */
+#define BK_FREE 0 /* indicates free mappings */
+#define BK_MAP_DIRTY 1 /* used by demand-loaded mappings */
/* flags for the client */
-#define CL_locked 0
+#define CL_LOCKED 0
/* flags for the domain */
-#define DM_map_dirty 0
+#define DM_MAP_DIRTY 0
struct tegra_iovmm_block {
struct tegra_iovmm_area vm_area;
@@ -92,7 +92,7 @@ size_t tegra_iovmm_get_max_free(struct tegra_iovmm_client *client)
while (n) {
b = rb_entry(n, struct tegra_iovmm_block, all_node);
n = rb_next(n);
- if (test_bit(BK_free, &b->flags)) {
+ if (test_bit(BK_FREE, &b->flags)) {
max_free = max_t(tegra_iovmm_addr_t,
max_free, iovmm_length(b));
}
@@ -122,7 +122,7 @@ static void tegra_iovmm_block_stats(struct tegra_iovmm_domain *domain,
n = rb_next(n);
(*num_blocks)++;
*total += b->length;
- if (test_bit(BK_free, &b->flags)) {
+ if (test_bit(BK_FREE, &b->flags)) {
(*num_free)++;
*total_free += b->length;
*max_free = max_t(size_t, *max_free, b->length);
@@ -197,9 +197,9 @@ static void iovmm_free_block(struct tegra_iovmm_domain *domain,
succ = rb_entry(temp, struct tegra_iovmm_block, all_node);
if (pred)
- pred_free = test_bit(BK_free, &pred->flags);
+ pred_free = test_bit(BK_FREE, &pred->flags);
if (succ)
- succ_free = test_bit(BK_free, &succ->flags);
+ succ_free = test_bit(BK_FREE, &succ->flags);
if (pred_free && succ_free) {
pred->length += block->length;
@@ -236,7 +236,7 @@ static void iovmm_free_block(struct tegra_iovmm_domain *domain,
}
rb_link_node(&block->free_node, parent, p);
rb_insert_color(&block->free_node, &domain->free_blocks);
- set_bit(BK_free, &block->flags);
+ set_bit(BK_FREE, &block->flags);
spin_unlock(&domain->block_lock);
}
@@ -275,7 +275,7 @@ static struct tegra_iovmm_block *iovmm_split_free_block(
else
p = &parent->rb_left;
}
- set_bit(BK_free, &rem->flags);
+ set_bit(BK_FREE, &rem->flags);
rb_link_node(&rem->free_node, parent, p);
rb_insert_color(&rem->free_node, &domain->free_blocks);
@@ -357,7 +357,7 @@ static struct tegra_iovmm_block *iovmm_alloc_block(
/* Unfree designed block */
rb_erase(&best->free_node, &domain->free_blocks);
- clear_bit(BK_free, &best->flags);
+ clear_bit(BK_FREE, &best->flags);
atomic_inc(&best->ref);
iovmm_start(best) = best->start + simalign;
@@ -424,7 +424,7 @@ static struct tegra_iovmm_block *iovmm_allocate_vm(
/* remove the desired block from free list. */
rb_erase(&best->free_node, &domain->free_blocks);
- clear_bit(BK_free, &best->flags);
+ clear_bit(BK_FREE, &best->flags);
atomic_inc(&best->ref);
iovmm_start(best) = iovm_start;
@@ -468,7 +468,7 @@ int tegra_iovmm_domain_init(struct tegra_iovmm_domain *domain,
b->start = round_up(start, page_size);
b->length = round_down(end, page_size) - b->start;
- set_bit(BK_free, &b->flags);
+ set_bit(BK_FREE, &b->flags);
rb_link_node(&b->free_node, NULL, &domain->free_blocks.rb_node);
rb_insert_color(&b->free_node, &domain->free_blocks);
rb_link_node(&b->all_node, NULL, &domain->all_blocks.rb_node);
@@ -505,9 +505,9 @@ struct tegra_iovmm_area *tegra_iovmm_create_vm(
b->vm_area.ops = ops;
down_read(&b->vm_area.domain->map_lock);
- if (ops && !test_bit(CL_locked, &client->flags)) {
- set_bit(BK_map_dirty, &b->flags);
- set_bit(DM_map_dirty, &client->domain->flags);
+ if (ops && !test_bit(CL_LOCKED, &client->flags)) {
+ set_bit(BK_MAP_DIRTY, &b->flags);
+ set_bit(DM_MAP_DIRTY, &client->domain->flags);
} else if (ops) {
if (domain->dev->ops->map(domain, &b->vm_area))
pr_err("%s failed to map locked domain\n", __func__);
@@ -521,7 +521,8 @@ void tegra_iovmm_vm_insert_pfn(struct tegra_iovmm_area *vm,
tegra_iovmm_addr_t vaddr, unsigned long pfn)
{
struct tegra_iovmm_domain *domain = vm->domain;
- BUG_ON(vaddr & ((1<<domain->dev->pgsize_bits)-1));
+
+ BUG_ON(vaddr & ((1 << domain->dev->pgsize_bits) - 1));
BUG_ON(vaddr >= vm->iovm_start + vm->iovm_length);
BUG_ON(vaddr < vm->iovm_start);
BUG_ON(vm->ops);
@@ -541,7 +542,7 @@ void tegra_iovmm_zap_vm(struct tegra_iovmm_area *vm)
* the memory for the page tables it uses may not be allocated
*/
down_read(&domain->map_lock);
- if (!test_and_clear_bit(BK_map_dirty, &b->flags))
+ if (!test_and_clear_bit(BK_MAP_DIRTY, &b->flags))
domain->dev->ops->unmap(domain, vm, false);
up_read(&domain->map_lock);
}
@@ -561,8 +562,8 @@ void tegra_iovmm_unzap_vm(struct tegra_iovmm_area *vm)
if (atomic_read(&domain->locks))
domain->dev->ops->map(domain, vm);
else {
- set_bit(BK_map_dirty, &b->flags);
- set_bit(DM_map_dirty, &domain->flags);
+ set_bit(BK_MAP_DIRTY, &b->flags);
+ set_bit(DM_MAP_DIRTY, &domain->flags);
}
}
up_read(&domain->map_lock);
@@ -579,7 +580,7 @@ void tegra_iovmm_free_vm(struct tegra_iovmm_area *vm)
b = container_of(vm, struct tegra_iovmm_block, vm_area);
domain = vm->domain;
down_read(&domain->map_lock);
- if (!test_and_clear_bit(BK_map_dirty, &b->flags))
+ if (!test_and_clear_bit(BK_MAP_DIRTY, &b->flags))
domain->dev->ops->unmap(domain, vm, true);
iovmm_free_block(domain, b);
up_read(&domain->map_lock);
@@ -619,7 +620,7 @@ struct tegra_iovmm_area *tegra_iovmm_find_area_get(
while (n) {
b = rb_entry(n, struct tegra_iovmm_block, all_node);
if (iovmm_start(b) <= addr && addr <= iovmm_end(b)) {
- if (test_bit(BK_free, &b->flags))
+ if (test_bit(BK_FREE, &b->flags))
b = NULL;
break;
}
@@ -646,7 +647,7 @@ static int _iovmm_client_lock(struct tegra_iovmm_client *client)
if (unlikely(!client))
return -ENODEV;
- if (unlikely(test_bit(CL_locked, &client->flags))) {
+ if (unlikely(test_bit(CL_LOCKED, &client->flags))) {
pr_err("attempting to relock client %s\n", client->name);
return 0;
}
@@ -666,7 +667,7 @@ static int _iovmm_client_lock(struct tegra_iovmm_client *client)
return -EAGAIN;
}
}
- if (test_and_clear_bit(DM_map_dirty, &domain->flags)) {
+ if (test_and_clear_bit(DM_MAP_DIRTY, &domain->flags)) {
struct rb_node *n;
struct tegra_iovmm_block *b;
@@ -675,10 +676,10 @@ static int _iovmm_client_lock(struct tegra_iovmm_client *client)
while (n) {
b = rb_entry(n, struct tegra_iovmm_block, all_node);
n = rb_next(n);
- if (test_bit(BK_free, &b->flags))
+ if (test_bit(BK_FREE, &b->flags))
continue;
- if (test_and_clear_bit(BK_map_dirty, &b->flags)) {
+ if (test_and_clear_bit(BK_MAP_DIRTY, &b->flags)) {
if (!b->vm_area.ops) {
pr_err("%s: "
"vm_area ops must exist for lazy maps\n",
@@ -689,7 +690,7 @@ static int _iovmm_client_lock(struct tegra_iovmm_client *client)
}
}
}
- set_bit(CL_locked, &client->flags);
+ set_bit(CL_LOCKED, &client->flags);
up_write(&domain->map_lock);
return 0;
}
@@ -724,7 +725,7 @@ void tegra_iovmm_client_unlock(struct tegra_iovmm_client *client)
if (!client)
return;
- if (!test_and_clear_bit(CL_locked, &client->flags)) {
+ if (!test_and_clear_bit(CL_LOCKED, &client->flags)) {
pr_err("unlocking unlocked client %s\n", client->name);
return;
}
@@ -779,7 +780,7 @@ void tegra_iovmm_free_client(struct tegra_iovmm_client *client)
domain = client->domain;
dev = domain->dev;
- if (test_and_clear_bit(CL_locked, &client->flags)) {
+ if (test_and_clear_bit(CL_LOCKED, &client->flags)) {
pr_err("freeing locked client %s\n", client->name);
if (!atomic_dec_return(&domain->locks)) {
down_write(&domain->map_lock);