Updated to fedora-glibc-20041005T0745
[kopensolaris-gnu/glibc.git] / sysdeps / i386 / bits / atomic.h
1 /* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, write to the Free
17    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18    02111-1307 USA.  */
19
20 #include <stdint.h>
21
22
23 typedef int8_t atomic8_t;
24 typedef uint8_t uatomic8_t;
25 typedef int_fast8_t atomic_fast8_t;
26 typedef uint_fast8_t uatomic_fast8_t;
27
28 typedef int16_t atomic16_t;
29 typedef uint16_t uatomic16_t;
30 typedef int_fast16_t atomic_fast16_t;
31 typedef uint_fast16_t uatomic_fast16_t;
32
33 typedef int32_t atomic32_t;
34 typedef uint32_t uatomic32_t;
35 typedef int_fast32_t atomic_fast32_t;
36 typedef uint_fast32_t uatomic_fast32_t;
37
38 typedef int64_t atomic64_t;
39 typedef uint64_t uatomic64_t;
40 typedef int_fast64_t atomic_fast64_t;
41 typedef uint_fast64_t uatomic_fast64_t;
42
43 typedef intptr_t atomicptr_t;
44 typedef uintptr_t uatomicptr_t;
45 typedef intmax_t atomic_max_t;
46 typedef uintmax_t uatomic_max_t;
47
48
49 #ifndef LOCK_PREFIX
50 # ifdef UP
51 #  define LOCK_PREFIX   /* nothing */
52 # else
53 #  define LOCK_PREFIX "lock;"
54 # endif
55 #endif
56
57 /* i386 doesn't have cmpxchg* and xadd*.  Instead of being completely
58    non-atomic, atomic_* macros that are using solely i386 instructions
59    are using those atomic instructions and the remaining ones are
60    non-atomic.  When in nscd, use i486+ instructions if on i486+.  */
61
62 #ifdef IS_IN_nscd
63
64 extern int has_cmpxchg attribute_hidden;
65
66 #define atomic_supports_shared has_cmpxchg
67
68 #define atomic_init_nscd \
69 int has_cmpxchg attribute_hidden;                                             \
70 static void __attribute__((constructor))                                      \
71 init_has_cmpxchg (void)                                                       \
72 {                                                                             \
73   int fl1, fl2;                                                               \
74   __asm__ ("pushfl; pushfl; popl %0; movl %0,%1; xorl %2,%0;"                 \
75            "pushl %0; popfl; pushfl; popl %0; popfl"                          \
76            : "=&r" (fl1), "=&r" (fl2) : "i" (0x00040000));                    \
77   if ((fl1 ^ fl2) & 0x00040000)                                               \
78     has_cmpxchg = 1;                                                          \
79 }
80
81 # define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
82   ({ __typeof (*mem) ret;                                                     \
83      if (__builtin_expect (has_cmpxchg, 1))                                   \
84        __asm __volatile (LOCK_PREFIX "cmpxchgb %b2, %1"                       \
85                          : "=a" (ret), "=m" (*mem)                            \
86                          : "q" (newval), "m" (*mem), "0" (oldval));           \
87      else                                                                     \
88        {                                                                      \
89          ret = *mem;                                                          \
90          if (ret == oldval)                                                   \
91            *mem = (newval);                                                   \
92        }                                                                      \
93      ret; })
94
95 # define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
96   ({ __typeof (*mem) ret;                                                     \
97      if (__builtin_expect (has_cmpxchg, 1))                                   \
98        __asm __volatile (LOCK_PREFIX "cmpxchgw %w2, %1"                       \
99                          : "=a" (ret), "=m" (*mem)                            \
100                          : "r" (newval), "m" (*mem), "0" (oldval));           \
101      else                                                                     \
102        {                                                                      \
103          ret = *mem;                                                          \
104          if (ret == oldval)                                                   \
105            *mem = (newval);                                                   \
106        }                                                                      \
107      ret; })
108
109 # define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
110   ({ __typeof (*mem) ret;                                                     \
111      if (__builtin_expect (has_cmpxchg, 1))                                   \
112        __asm __volatile (LOCK_PREFIX "cmpxchgl %2, %1"                        \
113                          : "=a" (ret), "=m" (*mem)                            \
114                          : "r" (newval), "m" (*mem), "0" (oldval));           \
115      else                                                                     \
116        {                                                                      \
117          ret = *mem;                                                          \
118          if (ret == oldval)                                                   \
119            *mem = (newval);                                                   \
120        }                                                                      \
121      ret; })
122
123 /* XXX We do not really need 64-bit compare-and-exchange.  At least
124    not in the moment.  */
125 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
126   ({ __typeof (*mem) ret = *(mem); abort (); ret = (newval); ret = (oldval); })
127
128
129 # define atomic_exchange_and_add(mem, value) \
130   ({ __typeof (*mem) __result;                                                \
131      __typeof (value) __addval = (value);                                     \
132      if (__builtin_expect (! has_cmpxchg, 0))                                 \
133        {                                                                      \
134          __typeof (mem) __memp = (mem);                                       \
135          __result = *__memp;                                                  \
136          *__memp += __addval;                                                 \
137        }                                                                      \
138      else if (sizeof (*mem) == 1)                                             \
139        __asm __volatile (LOCK_PREFIX "xaddb %b0, %1"                          \
140                          : "=r" (__result), "=m" (*mem)                       \
141                          : "0" (__addval), "m" (*mem));                       \
142      else if (sizeof (*mem) == 2)                                             \
143        __asm __volatile (LOCK_PREFIX "xaddw %w0, %1"                          \
144                          : "=r" (__result), "=m" (*mem)                       \
145                          : "0" (__addval), "m" (*mem));                       \
146      else if (sizeof (*mem) == 4)                                             \
147        __asm __volatile (LOCK_PREFIX "xaddl %0, %1"                           \
148                          : "=r" (__result), "=m" (*mem)                       \
149                          : "0" (__addval), "m" (*mem));                       \
150      else                                                                     \
151        {                                                                      \
152          __typeof (mem) __memp = (mem);                                       \
153          __typeof (*mem) __tmpval;                                            \
154          __result = *__memp;                                                  \
155          do                                                                   \
156            __tmpval = __result;                                               \
157          while ((__result = __arch_compare_and_exchange_val_64_acq            \
158                  (__memp, __result + __addval, __result)) == __tmpval);       \
159        }                                                                      \
160      __result; })
161
162 #else
163
164 /* We have by default no support for atomic operations.  So define
165    them non-atomic.  If this is a problem somebody will have to come
166    up with real definitions.  */
167
168 /* The only basic operation needed is compare and exchange.  */
169 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
170   ({ __typeof (mem) __gmemp = (mem);                                          \
171      __typeof (*mem) __gret = *__gmemp;                                       \
172      __typeof (*mem) __gnewval = (newval);                                    \
173                                                                               \
174      if (__gret == (oldval))                                                  \
175        *__gmemp = __gnewval;                                                  \
176      __gret; })
177
178 #define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
179   ({ __typeof (mem) __gmemp = (mem);                                          \
180      __typeof (*mem) __gnewval = (newval);                                    \
181                                                                               \
182      *__gmemp == (oldval) ? (*__gmemp = __gnewval, 0) : 1; })
183
184 #endif
185
186 /* Note that we need no lock prefix.  */
187 #define atomic_exchange_acq(mem, newvalue) \
188   ({ __typeof (*mem) result;                                                  \
189      if (sizeof (*mem) == 1)                                                  \
190        __asm __volatile ("xchgb %b0, %1"                                      \
191                          : "=r" (result), "=m" (*mem)                         \
192                          : "0" (newvalue), "m" (*mem));                       \
193      else if (sizeof (*mem) == 2)                                             \
194        __asm __volatile ("xchgw %w0, %1"                                      \
195                          : "=r" (result), "=m" (*mem)                         \
196                          : "0" (newvalue), "m" (*mem));                       \
197      else if (sizeof (*mem) == 4)                                             \
198        __asm __volatile ("xchgl %0, %1"                                       \
199                          : "=r" (result), "=m" (*mem)                         \
200                          : "0" (newvalue), "m" (*mem));                       \
201      else                                                                     \
202        {                                                                      \
203          result = 0;                                                          \
204          abort ();                                                            \
205        }                                                                      \
206      result; })
207
208
209 #define atomic_add(mem, value) \
210   (void) ({ if (__builtin_constant_p (value) && (value) == 1)                 \
211               atomic_increment (mem);                                         \
212             else if (__builtin_constant_p (value) && (value) == -1)           \
213               atomic_decrement (mem);                                         \
214             else if (sizeof (*mem) == 1)                                      \
215               __asm __volatile (LOCK_PREFIX "addb %b1, %0"                    \
216                                 : "=m" (*mem)                                 \
217                                 : "ir" (value), "m" (*mem));                  \
218             else if (sizeof (*mem) == 2)                                      \
219               __asm __volatile (LOCK_PREFIX "addw %w1, %0"                    \
220                                 : "=m" (*mem)                                 \
221                                 : "ir" (value), "m" (*mem));                  \
222             else if (sizeof (*mem) == 4)                                      \
223               __asm __volatile (LOCK_PREFIX "addl %1, %0"                     \
224                                 : "=m" (*mem)                                 \
225                                 : "ir" (value), "m" (*mem));                  \
226             else                                                              \
227               {                                                               \
228                 __typeof (value) __addval = (value);                          \
229                 __typeof (mem) __memp = (mem);                                \
230                 __typeof (*mem) __oldval = *__memp;                           \
231                 __typeof (*mem) __tmpval;                                     \
232                 do                                                            \
233                   __tmpval = __oldval;                                        \
234                 while ((__oldval = __arch_compare_and_exchange_val_64_acq     \
235                        (__memp, __oldval + __addval, __oldval)) == __tmpval); \
236               }                                                               \
237             })
238
239
240 #define atomic_add_negative(mem, value) \
241   ({ unsigned char __result;                                                  \
242      if (sizeof (*mem) == 1)                                                  \
243        __asm __volatile (LOCK_PREFIX "addb %b2, %0; sets %1"                  \
244                          : "=m" (*mem), "=qm" (__result)                      \
245                          : "iq" (value), "m" (*mem));                         \
246      else if (sizeof (*mem) == 2)                                             \
247        __asm __volatile (LOCK_PREFIX "addw %w2, %0; sets %1"                  \
248                          : "=m" (*mem), "=qm" (__result)                      \
249                          : "ir" (value), "m" (*mem));                         \
250      else if (sizeof (*mem) == 4)                                             \
251        __asm __volatile (LOCK_PREFIX "addl %2, %0; sets %1"                   \
252                          : "=m" (*mem), "=qm" (__result)                      \
253                          : "ir" (value), "m" (*mem));                         \
254      else                                                                     \
255        abort ();                                                              \
256      __result; })
257
258
259 #define atomic_add_zero(mem, value) \
260   ({ unsigned char __result;                                                  \
261      if (sizeof (*mem) == 1)                                                  \
262        __asm __volatile (LOCK_PREFIX "addb %b2, %0; setz %1"                  \
263                          : "=m" (*mem), "=qm" (__result)                      \
264                          : "ir" (value), "m" (*mem));                         \
265      else if (sizeof (*mem) == 2)                                             \
266        __asm __volatile (LOCK_PREFIX "addw %w2, %0; setz %1"                  \
267                          : "=m" (*mem), "=qm" (__result)                      \
268                          : "ir" (value), "m" (*mem));                         \
269      else if (sizeof (*mem) == 4)                                             \
270        __asm __volatile (LOCK_PREFIX "addl %2, %0; setz %1"                   \
271                          : "=m" (*mem), "=qm" (__result)                      \
272                          : "ir" (value), "m" (*mem));                         \
273      else                                                                     \
274        abort ();                                                              \
275      __result; })
276
277
278 #define atomic_increment(mem) \
279   (void) ({ if (sizeof (*mem) == 1)                                           \
280               __asm __volatile (LOCK_PREFIX "incb %b0"                        \
281                                 : "=m" (*mem)                                 \
282                                 : "m" (*mem));                                \
283             else if (sizeof (*mem) == 2)                                      \
284               __asm __volatile (LOCK_PREFIX "incw %w0"                        \
285                                 : "=m" (*mem)                                 \
286                                 : "m" (*mem));                                \
287             else if (sizeof (*mem) == 4)                                      \
288               __asm __volatile (LOCK_PREFIX "incl %0"                         \
289                                 : "=m" (*mem)                                 \
290                                 : "m" (*mem));                                \
291             else                                                              \
292               {                                                               \
293                 __typeof (mem) __memp = (mem);                                \
294                 __typeof (*mem) __oldval = *__memp;                           \
295                 __typeof (*mem) __tmpval;                                     \
296                 do                                                            \
297                   __tmpval = __oldval;                                        \
298                 while ((__oldval = __arch_compare_and_exchange_val_64_acq     \
299                        (__memp, __oldval + 1, __oldval)) == __tmpval);        \
300               }                                                               \
301             })
302
303
304 #define atomic_increment_and_test(mem) \
305   ({ unsigned char __result;                                                  \
306      if (sizeof (*mem) == 1)                                                  \
307        __asm __volatile (LOCK_PREFIX "incb %0; sete %b1"                      \
308                          : "=m" (*mem), "=qm" (__result)                      \
309                          : "m" (*mem));                                       \
310      else if (sizeof (*mem) == 2)                                             \
311        __asm __volatile (LOCK_PREFIX "incw %0; sete %w1"                      \
312                          : "=m" (*mem), "=qm" (__result)                      \
313                          : "m" (*mem));                                       \
314      else if (sizeof (*mem) == 4)                                             \
315        __asm __volatile (LOCK_PREFIX "incl %0; sete %1"                       \
316                          : "=m" (*mem), "=qm" (__result)                      \
317                          : "m" (*mem));                                       \
318      else                                                                     \
319        abort ();                                                              \
320      __result; })
321
322
323 #define atomic_decrement(mem) \
324   (void) ({ if (sizeof (*mem) == 1)                                           \
325               __asm __volatile (LOCK_PREFIX "decb %b0"                        \
326                                 : "=m" (*mem)                                 \
327                                 : "m" (*mem));                                \
328             else if (sizeof (*mem) == 2)                                      \
329               __asm __volatile (LOCK_PREFIX "decw %w0"                        \
330                                 : "=m" (*mem)                                 \
331                                 : "m" (*mem));                                \
332             else if (sizeof (*mem) == 4)                                      \
333               __asm __volatile (LOCK_PREFIX "decl %0"                         \
334                                 : "=m" (*mem)                                 \
335                                 : "m" (*mem));                                \
336             else                                                              \
337               {                                                               \
338                 __typeof (mem) __memp = (mem);                                \
339                 __typeof (*mem) __oldval = *__memp;                           \
340                 __typeof (*mem) __tmpval;                                     \
341                 do                                                            \
342                   __tmpval = __oldval;                                        \
343                 while ((__oldval = __arch_compare_and_exchange_val_64_acq     \
344                        (__memp, __oldval - 1, __oldval)) == __tmpval);        \
345               }                                                               \
346             })
347
348
349 #define atomic_decrement_and_test(mem) \
350   ({ unsigned char __result;                                                  \
351      if (sizeof (*mem) == 1)                                                  \
352        __asm __volatile (LOCK_PREFIX "decb %b0; sete %1"                      \
353                          : "=m" (*mem), "=qm" (__result)                      \
354                          : "m" (*mem));                                       \
355      else if (sizeof (*mem) == 2)                                             \
356        __asm __volatile (LOCK_PREFIX "decw %w0; sete %1"                      \
357                          : "=m" (*mem), "=qm" (__result)                      \
358                          : "m" (*mem));                                       \
359      else if (sizeof (*mem) == 4)                                             \
360        __asm __volatile (LOCK_PREFIX "decl %0; sete %1"                       \
361                          : "=m" (*mem), "=qm" (__result)                      \
362                          : "m" (*mem));                                       \
363      else                                                                     \
364        abort ();                                                              \
365      __result; })
366
367
368 #define atomic_bit_set(mem, bit) \
369   (void) ({ if (sizeof (*mem) == 1)                                           \
370               __asm __volatile (LOCK_PREFIX "orb %b2, %0"                     \
371                                 : "=m" (*mem)                                 \
372                                 : "m" (*mem), "ir" (1 << (bit)));             \
373             else if (sizeof (*mem) == 2)                                      \
374               __asm __volatile (LOCK_PREFIX "orw %w2, %0"                     \
375                                 : "=m" (*mem)                                 \
376                                 : "m" (*mem), "ir" (1 << (bit)));             \
377             else if (sizeof (*mem) == 4)                                      \
378               __asm __volatile (LOCK_PREFIX "orl %2, %0"                      \
379                                 : "=m" (*mem)                                 \
380                                 : "m" (*mem), "ir" (1 << (bit)));             \
381             else                                                              \
382               abort ();                                                       \
383             })
384
385
386 #define atomic_bit_test_set(mem, bit) \
387   ({ unsigned char __result;                                                  \
388      if (sizeof (*mem) == 1)                                                  \
389        __asm __volatile (LOCK_PREFIX "btsb %3, %1; setc %0"                   \
390                          : "=q" (__result), "=m" (*mem)                       \
391                          : "m" (*mem), "ir" (bit));                           \
392      else if (sizeof (*mem) == 2)                                             \
393        __asm __volatile (LOCK_PREFIX "btsw %3, %1; setc %0"                   \
394                          : "=q" (__result), "=m" (*mem)                       \
395                          : "m" (*mem), "ir" (bit));                           \
396      else if (sizeof (*mem) == 4)                                             \
397        __asm __volatile (LOCK_PREFIX "btsl %3, %1; setc %0"                   \
398                          : "=q" (__result), "=m" (*mem)                       \
399                          : "m" (*mem), "ir" (bit));                           \
400      else                                                                     \
401        abort ();                                                              \
402      __result; })
403
404
405 #define atomic_delay() asm ("rep; nop")