summaryrefslogtreecommitdiff
path: root/arch/blackfin/include/asm/atomic.h
blob: 7cf508718605db96d005fff016d7c522b24787cf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
#ifndef __ARCH_BLACKFIN_ATOMIC__
#define __ARCH_BLACKFIN_ATOMIC__

#include <asm/system.h>	/* local_irq_XXX() */

/*
 * Atomic operations that C can't guarantee us.  Useful for
 * resource counting etc..
 *
 * Generally we do not concern about SMP BFIN systems, so we don't have
 * to deal with that.
 *
 * Tony Kou (tonyko@lineo.ca)   Lineo Inc.   2001
 */

typedef struct {
	int counter;
} atomic_t;
#define ATOMIC_INIT(i)	{ (i) }

#define atomic_read(v)		((v)->counter)
#define atomic_set(v, i)	(((v)->counter) = i)

static __inline__ void atomic_add(int i, atomic_t * v)
{
	long flags;

	local_irq_save(flags);
	v->counter += i;
	local_irq_restore(flags);
}

static __inline__ void atomic_sub(int i, atomic_t * v)
{
	long flags;

	local_irq_save(flags);
	v->counter -= i;
	local_irq_restore(flags);

}

static inline int atomic_add_return(int i, atomic_t * v)
{
	int __temp = 0;
	long flags;

	local_irq_save(flags);
	v->counter += i;
	__temp = v->counter;
	local_irq_restore(flags);


	return __temp;
}

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)
static inline int atomic_sub_return(int i, atomic_t * v)
{
	int __temp = 0;
	long flags;

	local_irq_save(flags);
	v->counter -= i;
	__temp = v->counter;
	local_irq_restore(flags);

	return __temp;
}

static __inline__ void atomic_inc(volatile atomic_t * v)
{
	long flags;

	local_irq_save(flags);
	v->counter++;
	local_irq_restore(flags);
}

#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

#define atomic_add_unless(v, a, u)				\
({								\
	int c, old;						\
	c = atomic_read(v);					\
	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
		c = old;					\
	c != (u);						\
})
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

static __inline__ void atomic_dec(volatile atomic_t * v)
{
	long flags;

	local_irq_save(flags);
	v->counter--;
	local_irq_restore(flags);
}

static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t * v)
{
	long flags;

	local_irq_save(flags);
	v->counter &= ~mask;
	local_irq_restore(flags);
}

static __inline__ void atomic_set_mask(unsigned int mask, atomic_t * v)
{
	long flags;

	local_irq_save(flags);
	v->counter |= mask;
	local_irq_restore(flags);
}

/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec()    barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc()    barrier()
#define smp_mb__after_atomic_inc() barrier()

#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))

/*
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)

#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)

#include <asm-generic/atomic.h>

#endif				/* __ARCH_BLACKFIN_ATOMIC __ */