Extend comment for __arch_compare_and_exchange_64_acq.
[kopensolaris-gnu/glibc.git] / nptl / sysdeps / i386 / i486 / bits / atomic.h
1 /* Copyright (C) 2002, 2003 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, write to the Free
17    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18    02111-1307 USA.  */
19
20 #include <stdint.h>
21
22
23 typedef int8_t atomic8_t;
24 typedef uint8_t uatomic8_t;
25 typedef int_fast8_t atomic_fast8_t;
26 typedef uint_fast8_t uatomic_fast8_t;
27
28 typedef int16_t atomic16_t;
29 typedef uint16_t uatomic16_t;
30 typedef int_fast16_t atomic_fast16_t;
31 typedef uint_fast16_t uatomic_fast16_t;
32
33 typedef int32_t atomic32_t;
34 typedef uint32_t uatomic32_t;
35 typedef int_fast32_t atomic_fast32_t;
36 typedef uint_fast32_t uatomic_fast32_t;
37
38 typedef int64_t atomic64_t;
39 typedef uint64_t uatomic64_t;
40 typedef int_fast64_t atomic_fast64_t;
41 typedef uint_fast64_t uatomic_fast64_t;
42
43 typedef intptr_t atomicptr_t;
44 typedef uintptr_t uatomicptr_t;
45 typedef intmax_t atomic_max_t;
46 typedef uintmax_t uatomic_max_t;
47
48
49 #ifndef LOCK
50 # ifdef UP
51 #  define LOCK  /* nothing */
52 # else
53 #  define LOCK "lock;"
54 # endif
55 #endif
56
57
58 #define __arch_compare_and_exchange_8_acq(mem, newval, oldval) \
59   ({ unsigned char ret;                                                       \
60      __asm __volatile (LOCK "cmpxchgb %b2, %1; setne %0"                      \
61                        : "=a" (ret), "=m" (*mem)                              \
62                        : "q" (newval), "1" (*mem), "0" (oldval));             \
63      ret; })
64
65 #define __arch_compare_and_exchange_16_acq(mem, newval, oldval) \
66   ({ unsigned char ret;                                                       \
67      __asm __volatile (LOCK "cmpxchgw %w2, %1; setne %0"                      \
68                        : "=a" (ret), "=m" (*mem)                              \
69                        : "r" (newval), "1" (*mem), "0" (oldval));             \
70      ret; })
71
72 #define __arch_compare_and_exchange_32_acq(mem, newval, oldval) \
73   ({ unsigned char ret;                                                       \
74      __asm __volatile (LOCK "cmpxchgl %2, %1; setne %0"                       \
75                        : "=a" (ret), "=m" (*mem)                              \
76                        : "r" (newval), "1" (*mem), "0" (oldval));             \
77      ret; })
78
79 /* XXX We do not really need 64-bit compare-and-exchange.  At least
80    not in the moment.  Using it would mean causing portability
81    problems since not many other 32-bit architectures have support for
82    such an operation.  So don't define any code for now.  If it is
83    really going to be used the code below can be used on Intel Pentium
84    and later, but NOT on i486.  */
85 #if 1
86 # define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
87   (abort (), 0)
88 #else
89 # ifdef __PIC__
90 #  define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
91   ({ unsigned char ret;                                                       \
92      int ignore;                                                              \
93      __asm __volatile ("xchgl %3, %%ebx\n\t"                                  \
94                        LOCK "cmpxchg8b %2, %1\n\t"                            \
95                        "setne %0\n\t"                                         \
96                        "xchgl %3, %%ebx"                                      \
97                        : "=a" (ret), "=m" (*mem), "=d" (ignore)               \
98                        : "DS" (((unsigned long long int) (newval))            \
99                                & 0xffffffff),                                 \
100                          "c" (((unsigned long long int) (newval)) >> 32),     \
101                          "1" (*mem), "0" (((unsigned long long int) (oldval)) \
102                                           & 0xffffffff),                      \
103                          "2" (((unsigned long long int) (oldval)) >> 32));    \
104      ret; })
105 # else
106 #  define __arch_compare_and_exchange_64_acq(mem, newval, oldval) \
107   ({ unsigned char ret;                                                       \
108      int ignore;                                                              \
109      __asm __volatile (LOCK "cmpxchg8b %2, %1; setne %0"                      \
110                        : "=a" (ret), "=m" (*mem), "=d" (ignore)               \
111                        : "b" (((unsigned long long int) (newval))             \
112                               & 0xffffffff),                                  \
113                           "c" (((unsigned long long int) (newval)) >> 32),    \
114                          "1" (*mem), "0" (((unsigned long long int) (oldval)) \
115                                           & 0xffffffff),                      \
116                          "2" (((unsigned long long int) (oldval)) >> 32));    \
117      ret; })
118 # endif
119 #endif
120
121
122 #define atomic_exchange_and_add(mem, value) \
123   ({ __typeof (*mem) result;                                                  \
124      if (sizeof (*mem) == 1)                                                  \
125        __asm __volatile (LOCK "xaddb %b0, %1"                                 \
126                          : "=r" (result), "=m" (*mem)                         \
127                          : "0" (value), "1" (*mem));                          \
128      else if (sizeof (*mem) == 2)                                             \
129        __asm __volatile (LOCK "xaddw %w0, %1"                                 \
130                          : "=r" (result), "=m" (*mem)                         \
131                          : "0" (value), "1" (*mem));                          \
132      else if (sizeof (*mem) == 4)                                             \
133        __asm __volatile (LOCK "xaddl %0, %1"                                  \
134                          : "=r" (result), "=m" (*mem)                         \
135                          : "0" (value), "1" (*mem));                          \
136      else                                                                     \
137        {                                                                      \
138          __typeof (value) addval = (value);                                   \
139          __typeof (*mem) oldval;                                              \
140          __typeof (mem) memp = (mem);                                         \
141          do                                                                   \
142            result = (oldval = *memp) + addval;                                \
143          while (! __arch_compare_and_exchange_64_acq (memp, result, oldval)); \
144        }                                                                      \
145      result; })
146
147
148 #define atomic_add(mem, value) \
149   (void) ({ if (__builtin_constant_p (value) && (value) == 1)                 \
150               atomic_increment (mem);                                         \
151             else if (__builtin_constant_p (value) && (value) == 1)            \
152               atomic_decrement (mem);                                         \
153             else if (sizeof (*mem) == 1)                                      \
154               __asm __volatile (LOCK "addb %b1, %0"                           \
155                                 : "=m" (*mem)                                 \
156                                 : "ir" (value), "0" (*mem));                  \
157             else if (sizeof (*mem) == 2)                                      \
158               __asm __volatile (LOCK "addw %w1, %0"                           \
159                                 : "=m" (*mem)                                 \
160                                 : "ir" (value), "0" (*mem));                  \
161             else if (sizeof (*mem) == 4)                                      \
162               __asm __volatile (LOCK "addl %1, %0"                            \
163                                 : "=m" (*mem)                                 \
164                                 : "ir" (value), "0" (*mem));                  \
165             else                                                              \
166               {                                                               \
167                 __typeof (value) addval = (value);                            \
168                 __typeof (*mem) oldval;                                       \
169                 __typeof (mem) memp = (mem);                                  \
170                 do                                                            \
171                   oldval = *memp;                                             \
172                 while (! __arch_compare_and_exchange_64_acq (memp,            \
173                                                              oldval + addval, \
174                                                              oldval));        \
175               }                                                               \
176             })
177
178
179 #define atomic_add_negative(mem, value) \
180   ({ unsigned char __result;                                                  \
181      if (sizeof (*mem) == 1)                                                  \
182        __asm __volatile (LOCK "addb %b2, %0; sets %1"                         \
183                          : "=m" (*mem), "=qm" (__result)                      \
184                          : "iq" (value), "0" (*mem));                         \
185      else if (sizeof (*mem) == 2)                                             \
186        __asm __volatile (LOCK "addw %w2, %0; sets %1"                         \
187                          : "=m" (*mem), "=qm" (__result)                      \
188                          : "ir" (value), "0" (*mem));                         \
189      else if (sizeof (*mem) == 4)                                             \
190        __asm __volatile (LOCK "addl %2, %0; sets %1"                          \
191                          : "=m" (*mem), "=qm" (__result)                      \
192                          : "ir" (value), "0" (*mem));                         \
193      else                                                                     \
194        abort ();                                                              \
195      __result; })
196
197
198 #define atomic_add_zero(mem, value) \
199   ({ unsigned char __result;                                                  \
200      if (sizeof (*mem) == 1)                                                  \
201        __asm __volatile (LOCK "addb %b2, %0; setz %1"                         \
202                          : "=m" (*mem), "=qm" (__result)                      \
203                          : "ir" (value), "0" (*mem));                         \
204      else if (sizeof (*mem) == 2)                                             \
205        __asm __volatile (LOCK "addw %w2, %0; setz %1"                         \
206                          : "=m" (*mem), "=qm" (__result)                      \
207                          : "ir" (value), "0" (*mem));                         \
208      else if (sizeof (*mem) == 4)                                             \
209        __asm __volatile (LOCK "addl %2, %0; setz %1"                          \
210                          : "=m" (*mem), "=qm" (__result)                      \
211                          : "ir" (value), "0" (*mem));                         \
212      else                                                                     \
213        abort ();                                                              \
214      __result; })
215
216
217 #define atomic_increment(mem) \
218   (void) ({ if (sizeof (*mem) == 1)                                           \
219               __asm __volatile (LOCK "incb %b0"                               \
220                                 : "=m" (*mem)                                 \
221                                 : "0" (*mem));                                \
222             else if (sizeof (*mem) == 2)                                      \
223               __asm __volatile (LOCK "incw %w0"                               \
224                                 : "=m" (*mem)                                 \
225                                 : "0" (*mem));                                \
226             else if (sizeof (*mem) == 4)                                      \
227               __asm __volatile (LOCK "incl %0"                                \
228                                 : "=m" (*mem)                                 \
229                                 : "0" (*mem));                                \
230             else                                                              \
231               {                                                               \
232                 __typeof (*mem) oldval;                                       \
233                 __typeof (mem) memp = (mem);                                  \
234                 do                                                            \
235                   oldval = *memp;                                             \
236                 while (! __arch_compare_and_exchange_64_acq (memp,            \
237                                                              oldval + 1,      \
238                                                              oldval));        \
239               }                                                               \
240             })
241
242
243 #define atomic_increment_and_test(mem) \
244   ({ unsigned char __result;                                                  \
245      if (sizeof (*mem) == 1)                                                  \
246        __asm __volatile (LOCK "incb %0; sete %b1"                             \
247                          : "=m" (*mem), "=qm" (__result)                      \
248                          : "0" (*mem));                                       \
249      else if (sizeof (*mem) == 2)                                             \
250        __asm __volatile (LOCK "incw %0; sete %w1"                             \
251                          : "=m" (*mem), "=qm" (__result)                      \
252                          : "0" (*mem));                                       \
253      else if (sizeof (*mem) == 4)                                             \
254        __asm __volatile (LOCK "incl %0; sete %1"                              \
255                          : "=m" (*mem), "=qm" (__result)                      \
256                          : "0" (*mem));                                       \
257      else                                                                     \
258        abort ();                                                              \
259      __result; })
260
261
262 #define atomic_decrement(mem) \
263   (void) ({ if (sizeof (*mem) == 1)                                           \
264               __asm __volatile (LOCK "decb %b0"                               \
265                                 : "=m" (*mem)                                 \
266                                 : "0" (*mem));                                \
267             else if (sizeof (*mem) == 2)                                      \
268               __asm __volatile (LOCK "decw %w0"                               \
269                                 : "=m" (*mem)                                 \
270                                 : "0" (*mem));                                \
271             else if (sizeof (*mem) == 4)                                      \
272               __asm __volatile (LOCK "decl %0"                                \
273                                 : "=m" (*mem)                                 \
274                                 : "0" (*mem));                                \
275             else                                                              \
276               {                                                               \
277                 __typeof (*mem) oldval;                                       \
278                 __typeof (mem) memp = (mem);                                  \
279                 do                                                            \
280                   oldval = *memp;                                             \
281                 while (! __arch_compare_and_exchange_64_acq (memp,            \
282                                                              oldval - 1,      \
283                                                              oldval));        \
284               }                                                               \
285             })
286
287
288 #define atomic_decrement_and_test(mem) \
289   ({ unsigned char __result;                                                  \
290      if (sizeof (*mem) == 1)                                                  \
291        __asm __volatile (LOCK "decb %b0; sete %1"                             \
292                          : "=m" (*mem), "=qm" (__result)                      \
293                          : "0" (*mem));                                       \
294      else if (sizeof (*mem) == 2)                                             \
295        __asm __volatile (LOCK "decw %w0; sete %1"                             \
296                          : "=m" (*mem), "=qm" (__result)                      \
297                          : "0" (*mem));                                       \
298      else if (sizeof (*mem) == 4)                                             \
299        __asm __volatile (LOCK "decl %0; sete %1"                              \
300                          : "=m" (*mem), "=qm" (__result)                      \
301                          : "0" (*mem));                                       \
302      else                                                                     \
303        abort ();                                                              \
304      __result; })
305
306
307 #define atomic_bit_set(mem, bit) \
308   (void) ({ if (sizeof (*mem) == 1)                                           \
309               __asm __volatile (LOCK "orb %b2, %0"                            \
310                                 : "=m" (*mem)                                 \
311                                 : "0" (*mem), "i" (1 << (bit)));              \
312             else if (sizeof (*mem) == 2)                                      \
313               __asm __volatile (LOCK "orw %w2, %0"                            \
314                                 : "=m" (*mem)                                 \
315                                 : "0" (*mem), "i" (1 << (bit)));              \
316             else if (sizeof (*mem) == 4)                                      \
317               __asm __volatile (LOCK "orl %2, %0"                             \
318                                 : "=m" (*mem)                                 \
319                                 : "0" (*mem), "i" (1 << (bit)));              \
320             else                                                              \
321               abort ();                                                       \
322             })
323
324
325 #define atomic_bit_test_set(mem, bit) \
326   ({ unsigned char __result;                                                  \
327      if (sizeof (*mem) == 1)                                                  \
328        __asm __volatile (LOCK "btsb %3, %1; setc %0"                          \
329                          : "=q" (__result), "=m" (*mem)                       \
330                          : "1" (*mem), "i" (bit));                            \
331      else if (sizeof (*mem) == 2)                                             \
332        __asm __volatile (LOCK "btsw %3, %1; setc %0"                          \
333                          : "=q" (__result), "=m" (*mem)                       \
334                          : "1" (*mem), "i" (bit));                            \
335      else if (sizeof (*mem) == 4)                                             \
336        __asm __volatile (LOCK "btsl %3, %1; setc %0"                          \
337                          : "=q" (__result), "=m" (*mem)                       \
338                          : "1" (*mem), "i" (bit));                            \
339      else                                                                     \
340        abort ();                                                              \
341      __result; })