Print this page
patch atomic-8-bit
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/asm/atomic.h
+++ new/usr/src/uts/intel/asm/atomic.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26 26 */
27 27
28 28 #ifndef _ASM_ATOMIC_H
29 29 #define _ASM_ATOMIC_H
30 30
31 31 #include <sys/ccompile.h>
32 32 #include <sys/types.h>
33 33
34 34 #ifdef __cplusplus
35 35 extern "C" {
36 36 #endif
37 37
38 38 #if !defined(__lint) && defined(__GNUC__)
39 39
40 40 /* BEGIN CSTYLED */
41 41 /*
42 42 * This file contains a number of static inline functions implementing
43 43 * various atomic variable functions. Note that these are *not* all of the
44 44 * atomic_* functions as defined in usr/src/uts/common/sys/atomic.h. All
45 45 * possible atomic_* functions are implemented in usr/src/common/atomic in
46 46 * pure assembly. In the absence of an identically named function in this
47 47 * header file, any use of the function will result in the compiler emitting
48 48 * a function call as usual. On the other hand, if an identically named
49 49 * function exists in this header as a static inline, the compiler will
50 50 * inline its contents and the linker never sees the symbol reference. We
51 51 * use this to avoid implementing some of the more complex and less used
52 52 * functions and instead falling back to function calls. Note that in some
53 53 * cases (e.g., atomic_inc_64) we implement a static inline only on AMD64
54 54 * but not i386.
55 55 */
56 56
57 57 /*
58 58 * Instruction suffixes for various operand sizes (assuming AMD64)
59 59 */
60 60 #define SUF_8 "b"
61 61 #define SUF_16 "w"
62 62 #define SUF_32 "l"
63 63 #define SUF_64 "q"
64 64
65 65 #if defined(__amd64)
66 66 #define SUF_LONG SUF_64
67 67 #define SUF_PTR SUF_64
68 68 #define __ATOMIC_OP64(...) __ATOMIC_OPXX(__VA_ARGS__)
69 69 #elif defined(__i386)
70 70 #define SUF_LONG SUF_32
71 71 #define SUF_PTR SUF_32
72 72 #define __ATOMIC_OP64(...)
73 73 #else
74 74 #error "port me"
75 75 #endif
76 76
77 77 #if defined(__amd64) || defined(__i386)
78 78
79 79 #define __ATOMIC_OPXX(fxn, type, op) \
80 80 extern __GNU_INLINE void \
81 81 fxn(volatile type *target) \
82 82 { \
83 83 __asm__ __volatile__( \
84 84 "lock; " op " %0" \
85 85 : "+m" (*target)); \
86 86 }
87 87
88 88 __ATOMIC_OPXX(atomic_inc_8, uint8_t, "inc" SUF_8)
89 89 __ATOMIC_OPXX(atomic_inc_16, uint16_t, "inc" SUF_16)
90 90 __ATOMIC_OPXX(atomic_inc_32, uint32_t, "inc" SUF_32)
91 91 __ATOMIC_OP64(atomic_inc_64, uint64_t, "inc" SUF_64)
92 92 __ATOMIC_OPXX(atomic_inc_uchar, uchar_t, "inc" SUF_8)
93 93 __ATOMIC_OPXX(atomic_inc_ushort, ushort_t, "inc" SUF_16)
94 94 __ATOMIC_OPXX(atomic_inc_uint, uint_t, "inc" SUF_32)
95 95 __ATOMIC_OPXX(atomic_inc_ulong, ulong_t, "inc" SUF_LONG)
96 96
97 97 __ATOMIC_OPXX(atomic_dec_8, uint8_t, "dec" SUF_8)
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
98 98 __ATOMIC_OPXX(atomic_dec_16, uint16_t, "dec" SUF_16)
99 99 __ATOMIC_OPXX(atomic_dec_32, uint32_t, "dec" SUF_32)
100 100 __ATOMIC_OP64(atomic_dec_64, uint64_t, "dec" SUF_64)
101 101 __ATOMIC_OPXX(atomic_dec_uchar, uchar_t, "dec" SUF_8)
102 102 __ATOMIC_OPXX(atomic_dec_ushort, ushort_t, "dec" SUF_16)
103 103 __ATOMIC_OPXX(atomic_dec_uint, uint_t, "dec" SUF_32)
104 104 __ATOMIC_OPXX(atomic_dec_ulong, ulong_t, "dec" SUF_LONG)
105 105
106 106 #undef __ATOMIC_OPXX
107 107
108 -#define __ATOMIC_OPXX(fxn, type1, type2, op) \
108 +#define __ATOMIC_OPXX(fxn, type1, type2, op, reg) \
109 109 extern __GNU_INLINE void \
110 110 fxn(volatile type1 *target, type2 delta) \
111 111 { \
112 112 __asm__ __volatile__( \
113 113 "lock; " op " %1,%0" \
114 114 : "+m" (*target) \
115 - : "ir" (delta)); \
115 + : "i" reg (delta)); \
116 116 }
117 117
118 -__ATOMIC_OPXX(atomic_add_8, uint8_t, int8_t, "add" SUF_8)
119 -__ATOMIC_OPXX(atomic_add_16, uint16_t, int16_t, "add" SUF_16)
120 -__ATOMIC_OPXX(atomic_add_32, uint32_t, int32_t, "add" SUF_32)
121 -__ATOMIC_OP64(atomic_add_64, uint64_t, int64_t, "add" SUF_64)
122 -__ATOMIC_OPXX(atomic_add_char, uchar_t, signed char, "add" SUF_8)
123 -__ATOMIC_OPXX(atomic_add_short, ushort_t, short, "add" SUF_16)
124 -__ATOMIC_OPXX(atomic_add_int, uint_t, int, "add" SUF_32)
125 -__ATOMIC_OPXX(atomic_add_long, ulong_t, long, "add" SUF_LONG)
118 +__ATOMIC_OPXX(atomic_add_8, uint8_t, int8_t, "add" SUF_8, "q")
119 +__ATOMIC_OPXX(atomic_add_16, uint16_t, int16_t, "add" SUF_16, "r")
120 +__ATOMIC_OPXX(atomic_add_32, uint32_t, int32_t, "add" SUF_32, "r")
121 +__ATOMIC_OP64(atomic_add_64, uint64_t, int64_t, "add" SUF_64, "r")
122 +__ATOMIC_OPXX(atomic_add_char, uchar_t, signed char, "add" SUF_8, "q")
123 +__ATOMIC_OPXX(atomic_add_short, ushort_t, short, "add" SUF_16, "r")
124 +__ATOMIC_OPXX(atomic_add_int, uint_t, int, "add" SUF_32, "r")
125 +__ATOMIC_OPXX(atomic_add_long, ulong_t, long, "add" SUF_LONG, "r")
126 126
127 127 /*
128 128 * We don't use the above macro here because atomic_add_ptr has an
129 129 * inconsistent type. The first argument should really be a 'volatile void
130 130 * **'.
131 131 */
132 132 extern __GNU_INLINE void
133 133 atomic_add_ptr(volatile void *target, ssize_t delta)
134 134 {
135 135 volatile void **tmp = (volatile void **)target;
136 136
137 137 __asm__ __volatile__(
138 138 "lock; add" SUF_PTR " %1,%0"
139 139 : "+m" (*tmp)
140 140 : "ir" (delta));
141 141 }
142 142
143 -__ATOMIC_OPXX(atomic_or_8, uint8_t, uint8_t, "or" SUF_8)
144 -__ATOMIC_OPXX(atomic_or_16, uint16_t, uint16_t, "or" SUF_16)
145 -__ATOMIC_OPXX(atomic_or_32, uint32_t, uint32_t, "or" SUF_32)
146 -__ATOMIC_OP64(atomic_or_64, uint64_t, uint64_t, "or" SUF_64)
147 -__ATOMIC_OPXX(atomic_or_uchar, uchar_t, uchar_t, "or" SUF_8)
148 -__ATOMIC_OPXX(atomic_or_ushort, ushort_t, ushort_t, "or" SUF_16)
149 -__ATOMIC_OPXX(atomic_or_uint, uint_t, uint_t, "or" SUF_32)
150 -__ATOMIC_OPXX(atomic_or_ulong, ulong_t, ulong_t, "or" SUF_LONG)
143 +__ATOMIC_OPXX(atomic_or_8, uint8_t, uint8_t, "or" SUF_8, "q")
144 +__ATOMIC_OPXX(atomic_or_16, uint16_t, uint16_t, "or" SUF_16, "r")
145 +__ATOMIC_OPXX(atomic_or_32, uint32_t, uint32_t, "or" SUF_32, "r")
146 +__ATOMIC_OP64(atomic_or_64, uint64_t, uint64_t, "or" SUF_64, "r")
147 +__ATOMIC_OPXX(atomic_or_uchar, uchar_t, uchar_t, "or" SUF_8, "q")
148 +__ATOMIC_OPXX(atomic_or_ushort, ushort_t, ushort_t, "or" SUF_16, "r")
149 +__ATOMIC_OPXX(atomic_or_uint, uint_t, uint_t, "or" SUF_32, "r")
150 +__ATOMIC_OPXX(atomic_or_ulong, ulong_t, ulong_t, "or" SUF_LONG, "r")
151 151
152 -__ATOMIC_OPXX(atomic_and_8, uint8_t, uint8_t, "and" SUF_8)
153 -__ATOMIC_OPXX(atomic_and_16, uint16_t, uint16_t, "and" SUF_16)
154 -__ATOMIC_OPXX(atomic_and_32, uint32_t, uint32_t, "and" SUF_32)
155 -__ATOMIC_OP64(atomic_and_64, uint64_t, uint64_t, "and" SUF_64)
156 -__ATOMIC_OPXX(atomic_and_uchar, uchar_t, uchar_t, "and" SUF_8)
157 -__ATOMIC_OPXX(atomic_and_ushort, ushort_t, ushort_t, "and" SUF_16)
158 -__ATOMIC_OPXX(atomic_and_uint, uint_t, uint_t, "and" SUF_32)
159 -__ATOMIC_OPXX(atomic_and_ulong, ulong_t, ulong_t, "and" SUF_LONG)
152 +__ATOMIC_OPXX(atomic_and_8, uint8_t, uint8_t, "and" SUF_8, "q")
153 +__ATOMIC_OPXX(atomic_and_16, uint16_t, uint16_t, "and" SUF_16, "r")
154 +__ATOMIC_OPXX(atomic_and_32, uint32_t, uint32_t, "and" SUF_32, "r")
155 +__ATOMIC_OP64(atomic_and_64, uint64_t, uint64_t, "and" SUF_64, "r")
156 +__ATOMIC_OPXX(atomic_and_uchar, uchar_t, uchar_t, "and" SUF_8, "q")
157 +__ATOMIC_OPXX(atomic_and_ushort, ushort_t, ushort_t, "and" SUF_16, "r")
158 +__ATOMIC_OPXX(atomic_and_uint, uint_t, uint_t, "and" SUF_32, "r")
159 +__ATOMIC_OPXX(atomic_and_ulong, ulong_t, ulong_t, "and" SUF_LONG, "r")
160 160
161 161 #undef __ATOMIC_OPXX
162 162
163 163 #define __ATOMIC_OPXX(fxn, type, op, reg) \
164 164 extern __GNU_INLINE type \
165 165 fxn(volatile type *target, type cmp, type new) \
166 166 { \
167 167 type ret; \
168 168 __asm__ __volatile__( \
169 169 "lock; " op " %2,%0" \
170 170 : "+m" (*target), "=a" (ret) \
171 171 : reg (new), "1" (cmp) \
172 172 : "cc"); \
173 173 return (ret); \
174 174 }
175 175
176 176 __ATOMIC_OPXX(atomic_cas_8, uint8_t, "cmpxchg" SUF_8, "q")
177 177 __ATOMIC_OPXX(atomic_cas_16, uint16_t, "cmpxchg" SUF_16, "r")
178 178 __ATOMIC_OPXX(atomic_cas_32, uint32_t, "cmpxchg" SUF_32, "r")
179 179 __ATOMIC_OP64(atomic_cas_64, uint64_t, "cmpxchg" SUF_64, "r")
180 180 __ATOMIC_OPXX(atomic_cas_uchar, uchar_t, "cmpxchg" SUF_8, "q")
181 181 __ATOMIC_OPXX(atomic_cas_ushort, ushort_t, "cmpxchg" SUF_16, "r")
182 182 __ATOMIC_OPXX(atomic_cas_uint, uint_t, "cmpxchg" SUF_32, "r")
183 183 __ATOMIC_OPXX(atomic_cas_ulong, ulong_t, "cmpxchg" SUF_LONG, "r")
184 184
185 185 #undef __ATOMIC_OPXX
186 186
187 187 /*
188 188 * We don't use the above macro here because atomic_cas_ptr has an
189 189 * inconsistent type. The first argument should really be a 'volatile void
190 190 * **'.
191 191 */
192 192 extern __GNU_INLINE void *
193 193 atomic_cas_ptr(volatile void *target, void *cmp, void *new)
194 194 {
195 195 volatile void **tmp = (volatile void **)target;
196 196 void *ret;
197 197
198 198 __asm__ __volatile__(
199 199 "lock; cmpxchg" SUF_PTR " %2,%0"
200 200 : "+m" (*tmp), "=a" (ret)
201 201 : "r" (new), "1" (cmp)
202 202 : "cc");
203 203
204 204 return (ret);
205 205 }
206 206
207 207 #define __ATOMIC_OPXX(fxn, type, op, reg) \
208 208 extern __GNU_INLINE type \
209 209 fxn(volatile type *target, type val) \
210 210 { \
211 211 __asm__ __volatile__( \
212 212 op " %1,%0" \
213 213 : "+m" (*target), "+" reg (val)); \
214 214 return (val); \
215 215 }
216 216
217 217 __ATOMIC_OPXX(atomic_swap_8, uint8_t, "xchg" SUF_8, "q")
218 218 __ATOMIC_OPXX(atomic_swap_16, uint16_t, "xchg" SUF_16, "r")
219 219 __ATOMIC_OPXX(atomic_swap_32, uint32_t, "xchg" SUF_32, "r")
220 220 __ATOMIC_OP64(atomic_swap_64, uint64_t, "xchg" SUF_64, "r")
221 221 __ATOMIC_OPXX(atomic_swap_uchar, uchar_t, "xchg" SUF_8, "q")
222 222 __ATOMIC_OPXX(atomic_swap_ushort, ushort_t, "xchg" SUF_16, "r")
223 223 __ATOMIC_OPXX(atomic_swap_uint, uint_t, "xchg" SUF_32, "r")
224 224 __ATOMIC_OPXX(atomic_swap_ulong, ulong_t, "xchg" SUF_LONG, "r")
225 225
226 226 #undef __ATOMIC_OPXX
227 227
228 228 /*
229 229 * We don't use the above macro here because atomic_swap_ptr has an
230 230 * inconsistent type. The first argument should really be a 'volatile void
231 231 * **'.
232 232 */
233 233 extern __GNU_INLINE void *
234 234 atomic_swap_ptr(volatile void *target, void *val)
235 235 {
236 236 volatile void **tmp = (volatile void **)target;
237 237
238 238 __asm__ __volatile__(
239 239 "xchg" SUF_PTR " %1,%0"
240 240 : "+m" (*tmp), "+r" (val));
241 241
242 242 return (val);
243 243 }
244 244
245 245 #else
246 246 #error "port me"
247 247 #endif
248 248
249 249 #undef SUF_8
250 250 #undef SUF_16
251 251 #undef SUF_32
252 252 #undef SUF_64
253 253 #undef SUF_LONG
254 254 #undef SUF_PTR
255 255
256 256 #undef __ATOMIC_OP64
257 257
258 258 /* END CSTYLED */
259 259
260 260 #endif /* !__lint && __GNUC__ */
261 261
262 262 #ifdef __cplusplus
263 263 }
264 264 #endif
265 265
266 266 #endif /* _ASM_ATOMIC_H */
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX