summaryrefslogtreecommitdiff
path: root/include/asm-i386/mutex.h
blob: 7a17d9e58ad6586140e84a91a0a850b1b527e77a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
/*
 * Assembly implementation of the mutex fastpath, based on atomic
 * decrement/increment.
 *
 * started by Ingo Molnar:
 *
 *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 */
#ifndef _ASM_MUTEX_H
#define _ASM_MUTEX_H

#include "asm/alternative.h"

/**
 *  __mutex_fastpath_lock - try to take the lock by moving the count
 *                          from 1 to a 0 value
 *  @count: pointer of type atomic_t
 *  @fn: function to call if the original value was not 1
 *
 * Change the count from 1 to a value lower than 1, and call <fn> if it
 * wasn't 1 originally. This function MUST leave the value lower than 1
 * even when the "1" assertion wasn't true.
 */
#define __mutex_fastpath_lock(count, fail_fn)				\
do {									\
	unsigned int dummy;						\
									\
	typecheck(atomic_t *, count);					\
	typecheck_fn(fastcall void (*)(atomic_t *), fail_fn);		\
									\
	__asm__ __volatile__(						\
		LOCK_PREFIX "   decl (%%eax)	\n"			\
			"   jns 1f		\n"			\
			"   call "#fail_fn"	\n"			\
			"1:			\n"			\
									\
		:"=a" (dummy)						\
		: "a" (count)						\
		: "memory", "ecx", "edx");				\
} while (0)


/**
 *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
 *                                 from 1 to a 0 value
 *  @count: pointer of type atomic_t
 *  @fail_fn: function to call if the original value was not 1
 *
 * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
 * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
 * or anything the slow path function returns
 */
static inline int
__mutex_fastpath_lock_retval(atomic_t *count,
			     int fastcall (*fail_fn)(atomic_t *))
{
	if (unlikely(atomic_dec_return(count) < 0))
		return fail_fn(count);
	else
		return 0;
}

/**
 *  __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
 *  @count: pointer of type atomic_t
 *  @fail_fn: function to call if the original value was not 0
 *
 * try to promote the mutex from 0 to 1. if it wasn't 0, call <fail_fn>.
 * In the failure case, this function is allowed to either set the value
 * to 1, or to set it to a value lower than 1.
 *
 * If the implementation sets it to a value of lower than 1, the
 * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
 * to return 0 otherwise.
 */
#define __mutex_fastpath_unlock(count, fail_fn)				\
do {									\
	unsigned int dummy;						\
									\
	typecheck(atomic_t *, count);					\
	typecheck_fn(fastcall void (*)(atomic_t *), fail_fn);		\
									\
	__asm__ __volatile__(						\
		LOCK_PREFIX "   incl (%%eax)	\n"			\
			"   jg	1f		\n"			\
			"   call "#fail_fn"	\n"			\
			"1:			\n"			\
									\
		:"=a" (dummy)						\
		: "a" (count)						\
		: "memory", "ecx", "edx");				\
} while (0)

#define __mutex_slowpath_needs_to_unlock()	1

/**
 * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
 *
 *  @count: pointer of type atomic_t
 *  @fail_fn: fallback function
 *
 * Change the count from 1 to a value lower than 1, and return 0 (failure)
 * if it wasn't 1 originally, or return 1 (success) otherwise. This function
 * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
 * Additionally, if the value was < 0 originally, this function must not leave
 * it to 0 on failure.
 */
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
	/*
	 * We have two variants here. The cmpxchg based one is the best one
	 * because it never induce a false contention state.  It is included
	 * here because architectures using the inc/dec algorithms over the
	 * xchg ones are much more likely to support cmpxchg natively.
	 *
	 * If not we fall back to the spinlock based variant - that is
	 * just as efficient (and simpler) as a 'destructive' probing of
	 * the mutex state would be.
	 */
#ifdef __HAVE_ARCH_CMPXCHG
	if (likely(atomic_cmpxchg(count, 1, 0) == 1))
		return 1;
	return 0;
#else
	return fail_fn(count);
#endif
}

#endif