Print this page
5043 remove deprecated atomic functions' prototypes
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/common/atomic/sparcv9/atomic.s
+++ new/usr/src/common/atomic/sparcv9/atomic.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 .file "atomic.s"
28 28
29 29 #include <sys/asm_linkage.h>
30 30
31 31 /*
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
32 32 * ATOMIC_BO_ENABLE_SHIFT can be selectively defined by processors
33 33 * to enable exponential backoff. No definition means backoff is
34 34 * not desired i.e. backoff should be disabled.
35 35 * By default, the shift value is used to generate a power of 2
36 36 * value for backoff limit. In the kernel, processors scale this
37 37 * shift value with the number of online cpus.
38 38 */
39 39
40 40 #if defined(_KERNEL)
41 41 /*
42 - * Legacy kernel interfaces; they will go away (eventually).
42 + * Legacy kernel interfaces; they will go away the moment our closed
43 + * bins no longer require them.
43 44 */
44 45 ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
45 46 ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
46 47 ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
47 48 ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
48 49 ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
49 50 ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
50 51 ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
51 52 ANSI_PRAGMA_WEAK2(swapl,atomic_swap_32,function)
52 53
53 54 #ifdef ATOMIC_BO_ENABLE_SHIFT
54 55
55 56 #if !defined(lint)
56 57 .weak cpu_atomic_delay
57 58 .type cpu_atomic_delay, #function
58 59 #endif /* lint */
59 60
60 61 /*
61 62 * For the kernel, invoke processor specific delay routine to perform
62 63 * low-impact spin delay. The value of ATOMIC_BO_ENABLE_SHIFT is tuned
63 64 * with respect to the specific spin delay implementation.
64 65 */
65 66 #define DELAY_SPIN(label, tmp1, tmp2) \
66 67 /* ; \
67 68 * Define a pragma weak reference to a cpu specific ; \
68 69 * delay routine for atomic backoff. For CPUs that ; \
69 70 * have no such delay routine defined, the delay becomes ; \
70 71 * just a simple tight loop. ; \
71 72 * ; \
72 73 * tmp1 = holds CPU specific delay routine ; \
73 74 * tmp2 = holds atomic routine's callee return address ; \
74 75 */ ; \
75 76 sethi %hi(cpu_atomic_delay), tmp1 ; \
76 77 or tmp1, %lo(cpu_atomic_delay), tmp1 ; \
77 78 label/**/0: ; \
78 79 brz,pn tmp1, label/**/1 ; \
79 80 mov %o7, tmp2 ; \
80 81 jmpl tmp1, %o7 /* call CPU specific delay routine */ ; \
81 82 nop /* delay slot : do nothing */ ; \
82 83 mov tmp2, %o7 /* restore callee's return address */ ; \
83 84 label/**/1:
84 85
85 86 /*
86 87 * For the kernel, we take into consideration of cas failures
87 88 * and also scale the backoff limit w.r.t. the number of cpus.
88 89 * For cas failures, we reset the backoff value to 1 if the cas
89 90 * failures exceed or equal to the number of online cpus. This
90 91 * will enforce some degree of fairness and prevent starvation.
91 92 * We also scale/normalize the processor provided specific
92 93 * ATOMIC_BO_ENABLE_SHIFT w.r.t. the number of online cpus to
93 94 * obtain the actual final limit to use.
94 95 */
95 96 #define ATOMIC_BACKOFF_CPU(val, limit, ncpu, cas_cnt, label) \
96 97 brnz,pt ncpu, label/**/0 ; \
97 98 inc cas_cnt ; \
98 99 sethi %hi(ncpus_online), ncpu ; \
99 100 ld [ncpu + %lo(ncpus_online)], ncpu ; \
100 101 label/**/0: ; \
101 102 cmp cas_cnt, ncpu ; \
102 103 blu,pt %xcc, label/**/1 ; \
103 104 sllx ncpu, ATOMIC_BO_ENABLE_SHIFT, limit ; \
104 105 mov %g0, cas_cnt ; \
105 106 mov 1, val ; \
106 107 label/**/1:
107 108 #endif /* ATOMIC_BO_ENABLE_SHIFT */
108 109
109 110 #else /* _KERNEL */
110 111
111 112 /*
112 113 * ATOMIC_BO_ENABLE_SHIFT may be enabled/defined here for generic
113 114 * libc atomics. None for now.
114 115 */
115 116 #ifdef ATOMIC_BO_ENABLE_SHIFT
116 117 #define DELAY_SPIN(label, tmp1, tmp2) \
117 118 label/**/0:
118 119
119 120 #define ATOMIC_BACKOFF_CPU(val, limit, ncpu, cas_cnt, label) \
120 121 set 1 << ATOMIC_BO_ENABLE_SHIFT, limit
121 122 #endif /* ATOMIC_BO_ENABLE_SHIFT */
122 123 #endif /* _KERNEL */
123 124
124 125 #ifdef ATOMIC_BO_ENABLE_SHIFT
125 126 /*
126 127 * ATOMIC_BACKOFF_INIT macro for initialization.
127 128 * backoff val is initialized to 1.
128 129 * ncpu is initialized to 0
129 130 * The cas_cnt counts the cas instruction failure and is
130 131 * initialized to 0.
131 132 */
132 133 #define ATOMIC_BACKOFF_INIT(val, ncpu, cas_cnt) \
133 134 mov 1, val ; \
134 135 mov %g0, ncpu ; \
135 136 mov %g0, cas_cnt
136 137
137 138 #define ATOMIC_BACKOFF_BRANCH(cr, backoff, loop) \
138 139 bne,a,pn cr, backoff
139 140
140 141 /*
141 142 * Main ATOMIC_BACKOFF_BACKOFF macro for backoff.
142 143 */
143 144 #define ATOMIC_BACKOFF_BACKOFF(val, limit, ncpu, cas_cnt, label, retlabel) \
144 145 ATOMIC_BACKOFF_CPU(val, limit, ncpu, cas_cnt, label/**/_0) ; \
145 146 cmp val, limit ; \
146 147 blu,a,pt %xcc, label/**/_1 ; \
147 148 mov val, limit ; \
148 149 label/**/_1: ; \
149 150 mov limit, val ; \
150 151 DELAY_SPIN(label/**/_2, %g2, %g3) ; \
151 152 deccc limit ; \
152 153 bgu,pn %xcc, label/**/_20 /* branch to middle of DELAY_SPIN */ ; \
153 154 nop ; \
154 155 ba retlabel ; \
155 156 sllx val, 1, val
156 157
157 158 #else /* ATOMIC_BO_ENABLE_SHIFT */
158 159 #define ATOMIC_BACKOFF_INIT(val, ncpu, cas_cnt)
159 160
160 161 #define ATOMIC_BACKOFF_BRANCH(cr, backoff, loop) \
161 162 bne,a,pn cr, loop
162 163
163 164 #define ATOMIC_BACKOFF_BACKOFF(val, limit, ncpu, cas_cnt, label, retlabel)
164 165 #endif /* ATOMIC_BO_ENABLE_SHIFT */
165 166
166 167 /*
167 168 * NOTE: If atomic_inc_8 and atomic_inc_8_nv are ever
168 169 * separated, you need to also edit the libc sparcv9 platform
169 170 * specific mapfile and remove the NODYNSORT attribute
170 171 * from atomic_inc_8_nv.
171 172 */
172 173 ENTRY(atomic_inc_8)
173 174 ALTENTRY(atomic_inc_8_nv)
174 175 ALTENTRY(atomic_inc_uchar)
175 176 ALTENTRY(atomic_inc_uchar_nv)
176 177 ba add_8
177 178 add %g0, 1, %o1
178 179 SET_SIZE(atomic_inc_uchar_nv)
179 180 SET_SIZE(atomic_inc_uchar)
180 181 SET_SIZE(atomic_inc_8_nv)
181 182 SET_SIZE(atomic_inc_8)
182 183
183 184 /*
184 185 * NOTE: If atomic_dec_8 and atomic_dec_8_nv are ever
185 186 * separated, you need to also edit the libc sparcv9 platform
186 187 * specific mapfile and remove the NODYNSORT attribute
187 188 * from atomic_dec_8_nv.
188 189 */
189 190 ENTRY(atomic_dec_8)
190 191 ALTENTRY(atomic_dec_8_nv)
191 192 ALTENTRY(atomic_dec_uchar)
192 193 ALTENTRY(atomic_dec_uchar_nv)
193 194 ba add_8
194 195 sub %g0, 1, %o1
195 196 SET_SIZE(atomic_dec_uchar_nv)
196 197 SET_SIZE(atomic_dec_uchar)
197 198 SET_SIZE(atomic_dec_8_nv)
198 199 SET_SIZE(atomic_dec_8)
199 200
200 201 /*
201 202 * NOTE: If atomic_add_8 and atomic_add_8_nv are ever
202 203 * separated, you need to also edit the libc sparcv9 platform
203 204 * specific mapfile and remove the NODYNSORT attribute
204 205 * from atomic_add_8_nv.
205 206 */
206 207 ENTRY(atomic_add_8)
207 208 ALTENTRY(atomic_add_8_nv)
208 209 ALTENTRY(atomic_add_char)
209 210 ALTENTRY(atomic_add_char_nv)
210 211 add_8:
211 212 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
212 213 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
213 214 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
214 215 set 0xff, %o3 ! %o3 = mask
215 216 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
216 217 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
217 218 and %o1, %o3, %o1 ! %o1 = single byte value
218 219 andn %o0, 0x3, %o0 ! %o0 = word address
219 220 ld [%o0], %o2 ! read old value
220 221 1:
221 222 add %o2, %o1, %o5 ! add value to the old value
222 223 and %o5, %o3, %o5 ! clear other bits
223 224 andn %o2, %o3, %o4 ! clear target bits
224 225 or %o4, %o5, %o5 ! insert the new value
225 226 cas [%o0], %o2, %o5
226 227 cmp %o2, %o5
227 228 bne,a,pn %icc, 1b
228 229 mov %o5, %o2 ! %o2 = old value
229 230 add %o2, %o1, %o5
230 231 and %o5, %o3, %o5
231 232 retl
232 233 srl %o5, %g1, %o0 ! %o0 = new value
233 234 SET_SIZE(atomic_add_char_nv)
234 235 SET_SIZE(atomic_add_char)
235 236 SET_SIZE(atomic_add_8_nv)
236 237 SET_SIZE(atomic_add_8)
237 238
238 239 /*
239 240 * NOTE: If atomic_inc_16 and atomic_inc_16_nv are ever
240 241 * separated, you need to also edit the libc sparcv9 platform
241 242 * specific mapfile and remove the NODYNSORT attribute
242 243 * from atomic_inc_16_nv.
243 244 */
244 245 ENTRY(atomic_inc_16)
245 246 ALTENTRY(atomic_inc_16_nv)
246 247 ALTENTRY(atomic_inc_ushort)
247 248 ALTENTRY(atomic_inc_ushort_nv)
248 249 ba add_16
249 250 add %g0, 1, %o1
250 251 SET_SIZE(atomic_inc_ushort_nv)
251 252 SET_SIZE(atomic_inc_ushort)
252 253 SET_SIZE(atomic_inc_16_nv)
253 254 SET_SIZE(atomic_inc_16)
254 255
255 256 /*
256 257 * NOTE: If atomic_dec_16 and atomic_dec_16_nv are ever
257 258 * separated, you need to also edit the libc sparcv9 platform
258 259 * specific mapfile and remove the NODYNSORT attribute
259 260 * from atomic_dec_16_nv.
260 261 */
261 262 ENTRY(atomic_dec_16)
262 263 ALTENTRY(atomic_dec_16_nv)
263 264 ALTENTRY(atomic_dec_ushort)
264 265 ALTENTRY(atomic_dec_ushort_nv)
265 266 ba add_16
266 267 sub %g0, 1, %o1
267 268 SET_SIZE(atomic_dec_ushort_nv)
268 269 SET_SIZE(atomic_dec_ushort)
269 270 SET_SIZE(atomic_dec_16_nv)
270 271 SET_SIZE(atomic_dec_16)
271 272
272 273 /*
273 274 * NOTE: If atomic_add_16 and atomic_add_16_nv are ever
274 275 * separated, you need to also edit the libc sparcv9 platform
275 276 * specific mapfile and remove the NODYNSORT attribute
276 277 * from atomic_add_16_nv.
277 278 */
278 279 ENTRY(atomic_add_16)
279 280 ALTENTRY(atomic_add_16_nv)
280 281 ALTENTRY(atomic_add_short)
281 282 ALTENTRY(atomic_add_short_nv)
282 283 add_16:
283 284 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
284 285 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
285 286 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
286 287 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
287 288 sethi %hi(0xffff0000), %o3 ! %o3 = mask
288 289 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
289 290 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
290 291 and %o1, %o3, %o1 ! %o1 = single short value
291 292 andn %o0, 0x2, %o0 ! %o0 = word address
292 293 ! if low-order bit is 1, we will properly get an alignment fault here
293 294 ld [%o0], %o2 ! read old value
294 295 1:
295 296 add %o1, %o2, %o5 ! add value to the old value
296 297 and %o5, %o3, %o5 ! clear other bits
297 298 andn %o2, %o3, %o4 ! clear target bits
298 299 or %o4, %o5, %o5 ! insert the new value
299 300 cas [%o0], %o2, %o5
300 301 cmp %o2, %o5
301 302 bne,a,pn %icc, 1b
302 303 mov %o5, %o2 ! %o2 = old value
303 304 add %o1, %o2, %o5
304 305 and %o5, %o3, %o5
305 306 retl
306 307 srl %o5, %g1, %o0 ! %o0 = new value
307 308 SET_SIZE(atomic_add_short_nv)
308 309 SET_SIZE(atomic_add_short)
309 310 SET_SIZE(atomic_add_16_nv)
310 311 SET_SIZE(atomic_add_16)
311 312
312 313 /*
313 314 * NOTE: If atomic_inc_32 and atomic_inc_32_nv are ever
314 315 * separated, you need to also edit the libc sparcv9 platform
315 316 * specific mapfile and remove the NODYNSORT attribute
316 317 * from atomic_inc_32_nv.
317 318 */
318 319 ENTRY(atomic_inc_32)
319 320 ALTENTRY(atomic_inc_32_nv)
320 321 ALTENTRY(atomic_inc_uint)
321 322 ALTENTRY(atomic_inc_uint_nv)
322 323 ba add_32
323 324 add %g0, 1, %o1
324 325 SET_SIZE(atomic_inc_uint_nv)
325 326 SET_SIZE(atomic_inc_uint)
326 327 SET_SIZE(atomic_inc_32_nv)
327 328 SET_SIZE(atomic_inc_32)
328 329
329 330 /*
330 331 * NOTE: If atomic_dec_32 and atomic_dec_32_nv are ever
331 332 * separated, you need to also edit the libc sparcv9 platform
332 333 * specific mapfile and remove the NODYNSORT attribute
333 334 * from atomic_dec_32_nv.
334 335 */
335 336 ENTRY(atomic_dec_32)
336 337 ALTENTRY(atomic_dec_32_nv)
337 338 ALTENTRY(atomic_dec_uint)
338 339 ALTENTRY(atomic_dec_uint_nv)
339 340 ba add_32
340 341 sub %g0, 1, %o1
341 342 SET_SIZE(atomic_dec_uint_nv)
342 343 SET_SIZE(atomic_dec_uint)
343 344 SET_SIZE(atomic_dec_32_nv)
344 345 SET_SIZE(atomic_dec_32)
345 346
346 347 /*
347 348 * NOTE: If atomic_add_32 and atomic_add_32_nv are ever
348 349 * separated, you need to also edit the libc sparcv9 platform
349 350 * specific mapfile and remove the NODYNSORT attribute
350 351 * from atomic_add_32_nv.
351 352 */
352 353 ENTRY(atomic_add_32)
353 354 ALTENTRY(atomic_add_32_nv)
354 355 ALTENTRY(atomic_add_int)
355 356 ALTENTRY(atomic_add_int_nv)
356 357 add_32:
357 358 ATOMIC_BACKOFF_INIT(%o4, %g4, %g5)
358 359 0:
359 360 ld [%o0], %o2
360 361 1:
361 362 add %o2, %o1, %o3
362 363 cas [%o0], %o2, %o3
363 364 cmp %o2, %o3
364 365 ATOMIC_BACKOFF_BRANCH(%icc, 2f, 1b)
365 366 mov %o3, %o2
366 367 retl
367 368 add %o2, %o1, %o0 ! return new value
368 369 2:
369 370 ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, add32, 0b)
370 371 SET_SIZE(atomic_add_int_nv)
371 372 SET_SIZE(atomic_add_int)
372 373 SET_SIZE(atomic_add_32_nv)
373 374 SET_SIZE(atomic_add_32)
374 375
375 376 /*
376 377 * NOTE: If atomic_inc_64 and atomic_inc_64_nv are ever
377 378 * separated, you need to also edit the libc sparcv9 platform
378 379 * specific mapfile and remove the NODYNSORT attribute
379 380 * from atomic_inc_64_nv.
380 381 */
381 382 ENTRY(atomic_inc_64)
382 383 ALTENTRY(atomic_inc_64_nv)
383 384 ALTENTRY(atomic_inc_ulong)
384 385 ALTENTRY(atomic_inc_ulong_nv)
385 386 ba add_64
386 387 add %g0, 1, %o1
387 388 SET_SIZE(atomic_inc_ulong_nv)
388 389 SET_SIZE(atomic_inc_ulong)
389 390 SET_SIZE(atomic_inc_64_nv)
390 391 SET_SIZE(atomic_inc_64)
391 392
392 393 /*
393 394 * NOTE: If atomic_dec_64 and atomic_dec_64_nv are ever
394 395 * separated, you need to also edit the libc sparcv9 platform
395 396 * specific mapfile and remove the NODYNSORT attribute
396 397 * from atomic_dec_64_nv.
397 398 */
398 399 ENTRY(atomic_dec_64)
399 400 ALTENTRY(atomic_dec_64_nv)
400 401 ALTENTRY(atomic_dec_ulong)
401 402 ALTENTRY(atomic_dec_ulong_nv)
402 403 ba add_64
403 404 sub %g0, 1, %o1
404 405 SET_SIZE(atomic_dec_ulong_nv)
405 406 SET_SIZE(atomic_dec_ulong)
406 407 SET_SIZE(atomic_dec_64_nv)
407 408 SET_SIZE(atomic_dec_64)
408 409
409 410 /*
410 411 * NOTE: If atomic_add_64 and atomic_add_64_nv are ever
411 412 * separated, you need to also edit the libc sparcv9 platform
412 413 * specific mapfile and remove the NODYNSORT attribute
413 414 * from atomic_add_64_nv.
414 415 */
415 416 ENTRY(atomic_add_64)
416 417 ALTENTRY(atomic_add_64_nv)
417 418 ALTENTRY(atomic_add_ptr)
418 419 ALTENTRY(atomic_add_ptr_nv)
419 420 ALTENTRY(atomic_add_long)
420 421 ALTENTRY(atomic_add_long_nv)
421 422 add_64:
422 423 ATOMIC_BACKOFF_INIT(%o4, %g4, %g5)
423 424 0:
424 425 ldx [%o0], %o2
425 426 1:
426 427 add %o2, %o1, %o3
427 428 casx [%o0], %o2, %o3
428 429 cmp %o2, %o3
429 430 ATOMIC_BACKOFF_BRANCH(%xcc, 2f, 1b)
430 431 mov %o3, %o2
431 432 retl
432 433 add %o2, %o1, %o0 ! return new value
433 434 2:
434 435 ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, add64, 0b)
435 436 SET_SIZE(atomic_add_long_nv)
436 437 SET_SIZE(atomic_add_long)
437 438 SET_SIZE(atomic_add_ptr_nv)
438 439 SET_SIZE(atomic_add_ptr)
439 440 SET_SIZE(atomic_add_64_nv)
440 441 SET_SIZE(atomic_add_64)
441 442
442 443 /*
443 444 * NOTE: If atomic_or_8 and atomic_or_8_nv are ever
444 445 * separated, you need to also edit the libc sparcv9 platform
445 446 * specific mapfile and remove the NODYNSORT attribute
446 447 * from atomic_or_8_nv.
447 448 */
448 449 ENTRY(atomic_or_8)
449 450 ALTENTRY(atomic_or_8_nv)
450 451 ALTENTRY(atomic_or_uchar)
451 452 ALTENTRY(atomic_or_uchar_nv)
452 453 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
453 454 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
454 455 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
455 456 set 0xff, %o3 ! %o3 = mask
456 457 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
457 458 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
458 459 and %o1, %o3, %o1 ! %o1 = single byte value
459 460 andn %o0, 0x3, %o0 ! %o0 = word address
460 461 ld [%o0], %o2 ! read old value
461 462 1:
462 463 or %o2, %o1, %o5 ! or in the new value
463 464 cas [%o0], %o2, %o5
464 465 cmp %o2, %o5
465 466 bne,a,pn %icc, 1b
466 467 mov %o5, %o2 ! %o2 = old value
467 468 or %o2, %o1, %o5
468 469 and %o5, %o3, %o5
469 470 retl
470 471 srl %o5, %g1, %o0 ! %o0 = new value
471 472 SET_SIZE(atomic_or_uchar_nv)
472 473 SET_SIZE(atomic_or_uchar)
473 474 SET_SIZE(atomic_or_8_nv)
474 475 SET_SIZE(atomic_or_8)
475 476
476 477 /*
477 478 * NOTE: If atomic_or_16 and atomic_or_16_nv are ever
478 479 * separated, you need to also edit the libc sparcv9 platform
479 480 * specific mapfile and remove the NODYNSORT attribute
480 481 * from atomic_or_16_nv.
481 482 */
482 483 ENTRY(atomic_or_16)
483 484 ALTENTRY(atomic_or_16_nv)
484 485 ALTENTRY(atomic_or_ushort)
485 486 ALTENTRY(atomic_or_ushort_nv)
486 487 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
487 488 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
488 489 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
489 490 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
490 491 sethi %hi(0xffff0000), %o3 ! %o3 = mask
491 492 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
492 493 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
493 494 and %o1, %o3, %o1 ! %o1 = single short value
494 495 andn %o0, 0x2, %o0 ! %o0 = word address
495 496 ! if low-order bit is 1, we will properly get an alignment fault here
496 497 ld [%o0], %o2 ! read old value
497 498 1:
498 499 or %o2, %o1, %o5 ! or in the new value
499 500 cas [%o0], %o2, %o5
500 501 cmp %o2, %o5
501 502 bne,a,pn %icc, 1b
502 503 mov %o5, %o2 ! %o2 = old value
503 504 or %o2, %o1, %o5 ! or in the new value
504 505 and %o5, %o3, %o5
505 506 retl
506 507 srl %o5, %g1, %o0 ! %o0 = new value
507 508 SET_SIZE(atomic_or_ushort_nv)
508 509 SET_SIZE(atomic_or_ushort)
509 510 SET_SIZE(atomic_or_16_nv)
510 511 SET_SIZE(atomic_or_16)
511 512
512 513 /*
513 514 * NOTE: If atomic_or_32 and atomic_or_32_nv are ever
514 515 * separated, you need to also edit the libc sparcv9 platform
515 516 * specific mapfile and remove the NODYNSORT attribute
516 517 * from atomic_or_32_nv.
517 518 */
518 519 ENTRY(atomic_or_32)
519 520 ALTENTRY(atomic_or_32_nv)
520 521 ALTENTRY(atomic_or_uint)
521 522 ALTENTRY(atomic_or_uint_nv)
522 523 ATOMIC_BACKOFF_INIT(%o4, %g4, %g5)
523 524 0:
524 525 ld [%o0], %o2
525 526 1:
526 527 or %o2, %o1, %o3
527 528 cas [%o0], %o2, %o3
528 529 cmp %o2, %o3
529 530 ATOMIC_BACKOFF_BRANCH(%icc, 2f, 1b)
530 531 mov %o3, %o2
531 532 retl
532 533 or %o2, %o1, %o0 ! return new value
533 534 2:
534 535 ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, or32, 0b)
535 536 SET_SIZE(atomic_or_uint_nv)
536 537 SET_SIZE(atomic_or_uint)
537 538 SET_SIZE(atomic_or_32_nv)
538 539 SET_SIZE(atomic_or_32)
539 540
540 541 /*
541 542 * NOTE: If atomic_or_64 and atomic_or_64_nv are ever
542 543 * separated, you need to also edit the libc sparcv9 platform
543 544 * specific mapfile and remove the NODYNSORT attribute
544 545 * from atomic_or_64_nv.
545 546 */
546 547 ENTRY(atomic_or_64)
547 548 ALTENTRY(atomic_or_64_nv)
548 549 ALTENTRY(atomic_or_ulong)
549 550 ALTENTRY(atomic_or_ulong_nv)
550 551 ATOMIC_BACKOFF_INIT(%o4, %g4, %g5)
551 552 0:
552 553 ldx [%o0], %o2
553 554 1:
554 555 or %o2, %o1, %o3
555 556 casx [%o0], %o2, %o3
556 557 cmp %o2, %o3
557 558 ATOMIC_BACKOFF_BRANCH(%xcc, 2f, 1b)
558 559 mov %o3, %o2
559 560 retl
560 561 or %o2, %o1, %o0 ! return new value
561 562 2:
562 563 ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, or64, 0b)
563 564 SET_SIZE(atomic_or_ulong_nv)
564 565 SET_SIZE(atomic_or_ulong)
565 566 SET_SIZE(atomic_or_64_nv)
566 567 SET_SIZE(atomic_or_64)
567 568
568 569 /*
569 570 * NOTE: If atomic_and_8 and atomic_and_8_nv are ever
570 571 * separated, you need to also edit the libc sparcv9 platform
571 572 * specific mapfile and remove the NODYNSORT attribute
572 573 * from atomic_and_8_nv.
573 574 */
574 575 ENTRY(atomic_and_8)
575 576 ALTENTRY(atomic_and_8_nv)
576 577 ALTENTRY(atomic_and_uchar)
577 578 ALTENTRY(atomic_and_uchar_nv)
578 579 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
579 580 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
580 581 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
581 582 set 0xff, %o3 ! %o3 = mask
582 583 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
583 584 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
584 585 orn %o1, %o3, %o1 ! all ones in other bytes
585 586 andn %o0, 0x3, %o0 ! %o0 = word address
586 587 ld [%o0], %o2 ! read old value
587 588 1:
588 589 and %o2, %o1, %o5 ! and in the new value
589 590 cas [%o0], %o2, %o5
590 591 cmp %o2, %o5
591 592 bne,a,pn %icc, 1b
592 593 mov %o5, %o2 ! %o2 = old value
593 594 and %o2, %o1, %o5
594 595 and %o5, %o3, %o5
595 596 retl
596 597 srl %o5, %g1, %o0 ! %o0 = new value
597 598 SET_SIZE(atomic_and_uchar_nv)
598 599 SET_SIZE(atomic_and_uchar)
599 600 SET_SIZE(atomic_and_8_nv)
600 601 SET_SIZE(atomic_and_8)
601 602
602 603 /*
603 604 * NOTE: If atomic_and_16 and atomic_and_16_nv are ever
604 605 * separated, you need to also edit the libc sparcv9 platform
605 606 * specific mapfile and remove the NODYNSORT attribute
606 607 * from atomic_and_16_nv.
607 608 */
608 609 ENTRY(atomic_and_16)
609 610 ALTENTRY(atomic_and_16_nv)
610 611 ALTENTRY(atomic_and_ushort)
611 612 ALTENTRY(atomic_and_ushort_nv)
612 613 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
613 614 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
614 615 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
615 616 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
616 617 sethi %hi(0xffff0000), %o3 ! %o3 = mask
617 618 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
618 619 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
619 620 orn %o1, %o3, %o1 ! all ones in the other half
620 621 andn %o0, 0x2, %o0 ! %o0 = word address
621 622 ! if low-order bit is 1, we will properly get an alignment fault here
622 623 ld [%o0], %o2 ! read old value
623 624 1:
624 625 and %o2, %o1, %o5 ! and in the new value
625 626 cas [%o0], %o2, %o5
626 627 cmp %o2, %o5
627 628 bne,a,pn %icc, 1b
628 629 mov %o5, %o2 ! %o2 = old value
629 630 and %o2, %o1, %o5
630 631 and %o5, %o3, %o5
631 632 retl
632 633 srl %o5, %g1, %o0 ! %o0 = new value
633 634 SET_SIZE(atomic_and_ushort_nv)
634 635 SET_SIZE(atomic_and_ushort)
635 636 SET_SIZE(atomic_and_16_nv)
636 637 SET_SIZE(atomic_and_16)
637 638
638 639 /*
639 640 * NOTE: If atomic_and_32 and atomic_and_32_nv are ever
640 641 * separated, you need to also edit the libc sparcv9 platform
641 642 * specific mapfile and remove the NODYNSORT attribute
642 643 * from atomic_and_32_nv.
643 644 */
644 645 ENTRY(atomic_and_32)
645 646 ALTENTRY(atomic_and_32_nv)
646 647 ALTENTRY(atomic_and_uint)
647 648 ALTENTRY(atomic_and_uint_nv)
648 649 ATOMIC_BACKOFF_INIT(%o4, %g4, %g5)
649 650 0:
650 651 ld [%o0], %o2
651 652 1:
652 653 and %o2, %o1, %o3
653 654 cas [%o0], %o2, %o3
654 655 cmp %o2, %o3
655 656 ATOMIC_BACKOFF_BRANCH(%icc, 2f, 1b)
656 657 mov %o3, %o2
657 658 retl
658 659 and %o2, %o1, %o0 ! return new value
659 660 2:
660 661 ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, and32, 0b)
661 662 SET_SIZE(atomic_and_uint_nv)
662 663 SET_SIZE(atomic_and_uint)
663 664 SET_SIZE(atomic_and_32_nv)
664 665 SET_SIZE(atomic_and_32)
665 666
666 667 /*
667 668 * NOTE: If atomic_and_64 and atomic_and_64_nv are ever
668 669 * separated, you need to also edit the libc sparcv9 platform
669 670 * specific mapfile and remove the NODYNSORT attribute
670 671 * from atomic_and_64_nv.
671 672 */
672 673 ENTRY(atomic_and_64)
673 674 ALTENTRY(atomic_and_64_nv)
674 675 ALTENTRY(atomic_and_ulong)
675 676 ALTENTRY(atomic_and_ulong_nv)
676 677 ATOMIC_BACKOFF_INIT(%o4, %g4, %g5)
677 678 0:
678 679 ldx [%o0], %o2
679 680 1:
680 681 and %o2, %o1, %o3
681 682 casx [%o0], %o2, %o3
682 683 cmp %o2, %o3
683 684 ATOMIC_BACKOFF_BRANCH(%xcc, 2f, 1b)
684 685 mov %o3, %o2
685 686 retl
686 687 and %o2, %o1, %o0 ! return new value
687 688 2:
688 689 ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, and64, 0b)
689 690 SET_SIZE(atomic_and_ulong_nv)
690 691 SET_SIZE(atomic_and_ulong)
691 692 SET_SIZE(atomic_and_64_nv)
692 693 SET_SIZE(atomic_and_64)
693 694
694 695 ENTRY(atomic_cas_8)
695 696 ALTENTRY(atomic_cas_uchar)
696 697 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
697 698 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
698 699 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
699 700 set 0xff, %o3 ! %o3 = mask
700 701 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
701 702 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
702 703 and %o1, %o3, %o1 ! %o1 = single byte value
703 704 sll %o2, %g1, %o2 ! %o2 = shifted to bit offset
704 705 and %o2, %o3, %o2 ! %o2 = single byte value
705 706 andn %o0, 0x3, %o0 ! %o0 = word address
706 707 ld [%o0], %o4 ! read old value
707 708 1:
708 709 andn %o4, %o3, %o4 ! clear target bits
709 710 or %o4, %o2, %o5 ! insert the new value
710 711 or %o4, %o1, %o4 ! insert the comparison value
711 712 cas [%o0], %o4, %o5
712 713 cmp %o4, %o5 ! did we succeed?
713 714 be,pt %icc, 2f
714 715 and %o5, %o3, %o4 ! isolate the old value
715 716 cmp %o1, %o4 ! should we have succeeded?
716 717 be,a,pt %icc, 1b ! yes, try again
717 718 mov %o5, %o4 ! %o4 = old value
718 719 2:
719 720 retl
720 721 srl %o4, %g1, %o0 ! %o0 = old value
721 722 SET_SIZE(atomic_cas_uchar)
722 723 SET_SIZE(atomic_cas_8)
723 724
724 725 ENTRY(atomic_cas_16)
725 726 ALTENTRY(atomic_cas_ushort)
726 727 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
727 728 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
728 729 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
729 730 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
730 731 sethi %hi(0xffff0000), %o3 ! %o3 = mask
731 732 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
732 733 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
733 734 and %o1, %o3, %o1 ! %o1 = single short value
734 735 sll %o2, %g1, %o2 ! %o2 = shifted to bit offset
735 736 and %o2, %o3, %o2 ! %o2 = single short value
736 737 andn %o0, 0x2, %o0 ! %o0 = word address
737 738 ! if low-order bit is 1, we will properly get an alignment fault here
738 739 ld [%o0], %o4 ! read old value
739 740 1:
740 741 andn %o4, %o3, %o4 ! clear target bits
741 742 or %o4, %o2, %o5 ! insert the new value
742 743 or %o4, %o1, %o4 ! insert the comparison value
743 744 cas [%o0], %o4, %o5
744 745 cmp %o4, %o5 ! did we succeed?
745 746 be,pt %icc, 2f
746 747 and %o5, %o3, %o4 ! isolate the old value
747 748 cmp %o1, %o4 ! should we have succeeded?
748 749 be,a,pt %icc, 1b ! yes, try again
749 750 mov %o5, %o4 ! %o4 = old value
750 751 2:
751 752 retl
752 753 srl %o4, %g1, %o0 ! %o0 = old value
753 754 SET_SIZE(atomic_cas_ushort)
754 755 SET_SIZE(atomic_cas_16)
755 756
756 757 ENTRY(atomic_cas_32)
757 758 ALTENTRY(atomic_cas_uint)
758 759 cas [%o0], %o1, %o2
759 760 retl
760 761 mov %o2, %o0
761 762 SET_SIZE(atomic_cas_uint)
762 763 SET_SIZE(atomic_cas_32)
763 764
764 765 ENTRY(atomic_cas_64)
765 766 ALTENTRY(atomic_cas_ptr)
766 767 ALTENTRY(atomic_cas_ulong)
767 768 casx [%o0], %o1, %o2
768 769 retl
769 770 mov %o2, %o0
770 771 SET_SIZE(atomic_cas_ulong)
771 772 SET_SIZE(atomic_cas_ptr)
772 773 SET_SIZE(atomic_cas_64)
773 774
774 775 ENTRY(atomic_swap_8)
775 776 ALTENTRY(atomic_swap_uchar)
776 777 and %o0, 0x3, %o4 ! %o4 = byte offset, left-to-right
777 778 xor %o4, 0x3, %g1 ! %g1 = byte offset, right-to-left
778 779 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
779 780 set 0xff, %o3 ! %o3 = mask
780 781 sll %o3, %g1, %o3 ! %o3 = shifted to bit offset
781 782 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
782 783 and %o1, %o3, %o1 ! %o1 = single byte value
783 784 andn %o0, 0x3, %o0 ! %o0 = word address
784 785 ld [%o0], %o2 ! read old value
785 786 1:
786 787 andn %o2, %o3, %o5 ! clear target bits
787 788 or %o5, %o1, %o5 ! insert the new value
788 789 cas [%o0], %o2, %o5
789 790 cmp %o2, %o5
790 791 bne,a,pn %icc, 1b
791 792 mov %o5, %o2 ! %o2 = old value
792 793 and %o5, %o3, %o5
793 794 retl
794 795 srl %o5, %g1, %o0 ! %o0 = old value
795 796 SET_SIZE(atomic_swap_uchar)
796 797 SET_SIZE(atomic_swap_8)
797 798
798 799 ENTRY(atomic_swap_16)
799 800 ALTENTRY(atomic_swap_ushort)
800 801 and %o0, 0x2, %o4 ! %o4 = byte offset, left-to-right
801 802 xor %o4, 0x2, %g1 ! %g1 = byte offset, right-to-left
802 803 sll %o4, 3, %o4 ! %o4 = bit offset, left-to-right
803 804 sll %g1, 3, %g1 ! %g1 = bit offset, right-to-left
804 805 sethi %hi(0xffff0000), %o3 ! %o3 = mask
805 806 srl %o3, %o4, %o3 ! %o3 = shifted to bit offset
806 807 sll %o1, %g1, %o1 ! %o1 = shifted to bit offset
807 808 and %o1, %o3, %o1 ! %o1 = single short value
808 809 andn %o0, 0x2, %o0 ! %o0 = word address
809 810 ! if low-order bit is 1, we will properly get an alignment fault here
810 811 ld [%o0], %o2 ! read old value
811 812 1:
812 813 andn %o2, %o3, %o5 ! clear target bits
813 814 or %o5, %o1, %o5 ! insert the new value
814 815 cas [%o0], %o2, %o5
815 816 cmp %o2, %o5
816 817 bne,a,pn %icc, 1b
817 818 mov %o5, %o2 ! %o2 = old value
818 819 and %o5, %o3, %o5
819 820 retl
820 821 srl %o5, %g1, %o0 ! %o0 = old value
821 822 SET_SIZE(atomic_swap_ushort)
822 823 SET_SIZE(atomic_swap_16)
823 824
824 825 ENTRY(atomic_swap_32)
825 826 ALTENTRY(atomic_swap_uint)
826 827 ATOMIC_BACKOFF_INIT(%o4, %g4, %g5)
827 828 0:
828 829 ld [%o0], %o2
829 830 1:
830 831 mov %o1, %o3
831 832 cas [%o0], %o2, %o3
832 833 cmp %o2, %o3
833 834 ATOMIC_BACKOFF_BRANCH(%icc, 2f, 1b)
834 835 mov %o3, %o2
835 836 retl
836 837 mov %o3, %o0
837 838 2:
838 839 ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, swap32, 0b)
839 840 SET_SIZE(atomic_swap_uint)
840 841 SET_SIZE(atomic_swap_32)
841 842
842 843 ENTRY(atomic_swap_64)
843 844 ALTENTRY(atomic_swap_ptr)
844 845 ALTENTRY(atomic_swap_ulong)
845 846 ATOMIC_BACKOFF_INIT(%o4, %g4, %g5)
846 847 0:
847 848 ldx [%o0], %o2
848 849 1:
849 850 mov %o1, %o3
850 851 casx [%o0], %o2, %o3
851 852 cmp %o2, %o3
852 853 ATOMIC_BACKOFF_BRANCH(%xcc, 2f, 1b)
853 854 mov %o3, %o2
854 855 retl
855 856 mov %o3, %o0
856 857 2:
857 858 ATOMIC_BACKOFF_BACKOFF(%o4, %o5, %g4, %g5, swap64, 0b)
858 859 SET_SIZE(atomic_swap_ulong)
859 860 SET_SIZE(atomic_swap_ptr)
860 861 SET_SIZE(atomic_swap_64)
861 862
862 863 ENTRY(atomic_set_long_excl)
863 864 ATOMIC_BACKOFF_INIT(%o5, %g4, %g5)
864 865 mov 1, %o3
865 866 slln %o3, %o1, %o3
866 867 0:
867 868 ldn [%o0], %o2
868 869 1:
869 870 andcc %o2, %o3, %g0 ! test if the bit is set
870 871 bnz,a,pn %ncc, 2f ! if so, then fail out
871 872 mov -1, %o0
872 873 or %o2, %o3, %o4 ! set the bit, and try to commit it
873 874 casn [%o0], %o2, %o4
874 875 cmp %o2, %o4
875 876 ATOMIC_BACKOFF_BRANCH(%ncc, 5f, 1b)
876 877 mov %o4, %o2
877 878 mov %g0, %o0
878 879 2:
879 880 retl
880 881 nop
881 882 5:
882 883 ATOMIC_BACKOFF_BACKOFF(%o5, %g1, %g4, %g5, setlongexcl, 0b)
883 884 SET_SIZE(atomic_set_long_excl)
884 885
885 886 ENTRY(atomic_clear_long_excl)
886 887 ATOMIC_BACKOFF_INIT(%o5, %g4, %g5)
887 888 mov 1, %o3
888 889 slln %o3, %o1, %o3
889 890 0:
890 891 ldn [%o0], %o2
891 892 1:
892 893 andncc %o3, %o2, %g0 ! test if the bit is clear
893 894 bnz,a,pn %ncc, 2f ! if so, then fail out
894 895 mov -1, %o0
895 896 andn %o2, %o3, %o4 ! clear the bit, and try to commit it
896 897 casn [%o0], %o2, %o4
897 898 cmp %o2, %o4
898 899 ATOMIC_BACKOFF_BRANCH(%ncc, 5f, 1b)
899 900 mov %o4, %o2
900 901 mov %g0, %o0
901 902 2:
902 903 retl
903 904 nop
904 905 5:
905 906 ATOMIC_BACKOFF_BACKOFF(%o5, %g1, %g4, %g5, clrlongexcl, 0b)
906 907 SET_SIZE(atomic_clear_long_excl)
907 908
908 909 #if !defined(_KERNEL)
909 910
910 911 /*
911 912 * Spitfires and Blackbirds have a problem with membars in the
912 913 * delay slot (SF_ERRATA_51). For safety's sake, we assume
913 914 * that the whole world needs the workaround.
914 915 */
915 916 ENTRY(membar_enter)
916 917 membar #StoreLoad|#StoreStore
917 918 retl
918 919 nop
919 920 SET_SIZE(membar_enter)
920 921
921 922 ENTRY(membar_exit)
922 923 membar #LoadStore|#StoreStore
923 924 retl
924 925 nop
925 926 SET_SIZE(membar_exit)
926 927
927 928 ENTRY(membar_producer)
928 929 membar #StoreStore
929 930 retl
930 931 nop
931 932 SET_SIZE(membar_producer)
932 933
933 934 ENTRY(membar_consumer)
934 935 membar #LoadLoad
935 936 retl
936 937 nop
937 938 SET_SIZE(membar_consumer)
938 939
939 940 #endif /* !_KERNEL */
↓ open down ↓ |
887 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX