22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 .file "atomic.s"
28
29 #include <sys/asm_linkage.h>
30
31 /*
32 * ATOMIC_BO_ENABLE_SHIFT can be selectively defined by processors
33 * to enable exponential backoff. No definition means backoff is
34 * not desired i.e. backoff should be disabled.
35 * By default, the shift value is used to generate a power of 2
36 * value for backoff limit. In the kernel, processors scale this
37 * shift value with the number of online cpus.
38 */
39
40 #if defined(_KERNEL)
41 /*
42 * Legacy kernel interfaces; they will go away (eventually).
43 */
44 ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
45 ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
46 ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
47 ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
48 ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
49 ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
50 ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
51 ANSI_PRAGMA_WEAK2(swapl,atomic_swap_32,function)
52
53 #ifdef ATOMIC_BO_ENABLE_SHIFT
54
55 #if !defined(lint)
56 .weak cpu_atomic_delay
57 .type cpu_atomic_delay, #function
58 #endif /* lint */
59
60 /*
61 * For the kernel, invoke processor specific delay routine to perform
62 * low-impact spin delay. The value of ATOMIC_BO_ENABLE_SHIFT is tuned
|
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 .file "atomic.s"
28
29 #include <sys/asm_linkage.h>
30
31 /*
32 * ATOMIC_BO_ENABLE_SHIFT can be selectively defined by processors
33 * to enable exponential backoff. No definition means backoff is
34 * not desired i.e. backoff should be disabled.
35 * By default, the shift value is used to generate a power of 2
36 * value for backoff limit. In the kernel, processors scale this
37 * shift value with the number of online cpus.
38 */
39
40 #if defined(_KERNEL)
41 /*
42 * Legacy kernel interfaces; they will go away the moment our closed
43 * bins no longer require them.
44 */
45 ANSI_PRAGMA_WEAK2(cas8,atomic_cas_8,function)
46 ANSI_PRAGMA_WEAK2(cas32,atomic_cas_32,function)
47 ANSI_PRAGMA_WEAK2(cas64,atomic_cas_64,function)
48 ANSI_PRAGMA_WEAK2(caslong,atomic_cas_ulong,function)
49 ANSI_PRAGMA_WEAK2(casptr,atomic_cas_ptr,function)
50 ANSI_PRAGMA_WEAK2(atomic_and_long,atomic_and_ulong,function)
51 ANSI_PRAGMA_WEAK2(atomic_or_long,atomic_or_ulong,function)
52 ANSI_PRAGMA_WEAK2(swapl,atomic_swap_32,function)
53
54 #ifdef ATOMIC_BO_ENABLE_SHIFT
55
56 #if !defined(lint)
57 .weak cpu_atomic_delay
58 .type cpu_atomic_delay, #function
59 #endif /* lint */
60
61 /*
62 * For the kernel, invoke processor specific delay routine to perform
63 * low-impact spin delay. The value of ATOMIC_BO_ENABLE_SHIFT is tuned
|