summaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm/atomic_32.h
blob: 7ae128b19d3f6552a847acf4cc0c707f9766f9c7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
/* atomic.h: These still suck, but the I-cache hit rate is higher.
 *
 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
 * Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
 *
 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
 */

#ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__

#include <linux/types.h>

#ifdef __KERNEL__

#include <asm/system.h>

#define ATOMIC_INIT(i)  { (i) }

extern int __atomic_add_return(int, atomic_t *);
extern int atomic_cmpxchg(atomic_t *, int, int);
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
extern int atomic_add_unless(atomic_t *, int, int);
extern void atomic_set(atomic_t *, int);

#define atomic_read(v)          (*(volatile int *)&(v)->counter)

#define atomic_add(i, v)	((void)__atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v)	((void)__atomic_add_return(-(int)(i), (v)))
#define atomic_inc(v)		((void)__atomic_add_return(        1, (v)))
#define atomic_dec(v)		((void)__atomic_add_return(       -1, (v)))

#define atomic_add_return(i, v)	(__atomic_add_return( (int)(i), (v)))
#define atomic_sub_return(i, v)	(__atomic_add_return(-(int)(i), (v)))
#define atomic_inc_return(v)	(__atomic_add_return(        1, (v)))
#define atomic_dec_return(v)	(__atomic_add_return(       -1, (v)))

#define atomic_add_negative(a, v)	(atomic_add_return((a), (v)) < 0)

/*
 * atomic_inc_and_test - increment and test
 * @v: pointer of type atomic_t
 *
 * Atomically increments @v by 1
 * and returns true if the result is zero, or false for all
 * other cases.
 */
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)

#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)

#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

/* This is the old 24-bit implementation.  It's still used internally
 * by some sparc-specific code, notably the semaphore implementation.
 */
typedef struct { volatile int counter; } atomic24_t;

#ifndef CONFIG_SMP

#define ATOMIC24_INIT(i)  { (i) }
#define atomic24_read(v)          ((v)->counter)
#define atomic24_set(v, i)        (((v)->counter) = i)

#else
/* We do the bulk of the actual work out of line in two common
 * routines in assembler, see arch/sparc/lib/atomic.S for the
 * "fun" details.
 *
 * For SMP the trick is you embed the spin lock byte within
 * the word, use the low byte so signedness is easily retained
 * via a quick arithmetic shift.  It looks like this:
 *
 *	----------------------------------------
 *	| signed 24-bit counter value |  lock  |  atomic_t
 *	----------------------------------------
 *	 31                          8 7      0
 */

#define ATOMIC24_INIT(i)	{ ((i) << 8) }

static inline int atomic24_read(const atomic24_t *v)
{
	int ret = v->counter;

	while(ret & 0xff)
		ret = v->counter;

	return ret >> 8;
}

#define atomic24_set(v, i)	(((v)->counter) = ((i) << 8))
#endif

static inline int __atomic24_add(int i, atomic24_t *v)
{
	register volatile int *ptr asm("g1");
	register int increment asm("g2");
	register int tmp1 asm("g3");
	register int tmp2 asm("g4");
	register int tmp3 asm("g7");

	ptr = &v->counter;
	increment = i;

	__asm__ __volatile__(
	"mov	%%o7, %%g4\n\t"
	"call	___atomic24_add\n\t"
	" add	%%o7, 8, %%o7\n"
	: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
	: "0" (increment), "r" (ptr)
	: "memory", "cc");

	return increment;
}

static inline int __atomic24_sub(int i, atomic24_t *v)
{
	register volatile int *ptr asm("g1");
	register int increment asm("g2");
	register int tmp1 asm("g3");
	register int tmp2 asm("g4");
	register int tmp3 asm("g7");

	ptr = &v->counter;
	increment = i;

	__asm__ __volatile__(
	"mov	%%o7, %%g4\n\t"
	"call	___atomic24_sub\n\t"
	" add	%%o7, 8, %%o7\n"
	: "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
	: "0" (increment), "r" (ptr)
	: "memory", "cc");

	return increment;
}

#define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
#define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))

#define atomic24_dec_return(v) __atomic24_sub(1, (v))
#define atomic24_inc_return(v) __atomic24_add(1, (v))

#define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
#define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)

#define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
#define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))

#define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)

/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec()	barrier()
#define smp_mb__after_atomic_dec()	barrier()
#define smp_mb__before_atomic_inc()	barrier()
#define smp_mb__after_atomic_inc()	barrier()

#endif /* !(__KERNEL__) */

#include <asm-generic/atomic-long.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */