Print this page
XXXX pass in cpu_pause_func via pause_cpus
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4/os/mp_states.c
+++ new/usr/src/uts/sun4/os/mp_states.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/systm.h>
26 26 #include <sys/membar.h>
27 27 #include <sys/machsystm.h>
28 28 #include <sys/x_call.h>
29 29 #include <sys/platform_module.h>
30 30 #include <sys/cpuvar.h>
31 31 #include <sys/cpu_module.h>
32 32 #include <sys/cmp.h>
33 33 #include <sys/dumphdr.h>
34 34
35 35 #include <sys/cpu_sgnblk_defs.h>
36 36
37 37 static cpuset_t cpu_idle_set;
38 38 static kmutex_t cpu_idle_lock;
39 39 typedef const char *fn_t;
40 40
41 41 /*
42 42 * flags to determine if the PROM routines
43 43 * should be used to idle/resume/stop cpus
44 44 */
45 45 static int kern_idle[NCPU]; /* kernel's idle loop */
46 46 static int cpu_are_paused;
47 47 extern void debug_flush_windows();
48 48
49 49 /*
50 50 * Initialize the idlestop mutex
51 51 */
52 52 void
53 53 idlestop_init(void)
54 54 {
55 55 mutex_init(&cpu_idle_lock, NULL, MUTEX_SPIN, (void *)ipltospl(PIL_15));
56 56 }
57 57
58 58 static void
59 59 cpu_idle_self(void)
60 60 {
61 61 uint_t s;
62 62 label_t save;
63 63
64 64 s = spl8();
65 65 debug_flush_windows();
66 66
67 67 CPU->cpu_m.in_prom = 1;
68 68 membar_stld();
69 69
70 70 save = curthread->t_pcb;
71 71 (void) setjmp(&curthread->t_pcb);
72 72
73 73 kern_idle[CPU->cpu_id] = 1;
74 74 while (kern_idle[CPU->cpu_id])
75 75 dumpsys_helper_nw();
76 76
77 77 CPU->cpu_m.in_prom = 0;
78 78 membar_stld();
79 79
80 80 curthread->t_pcb = save;
81 81 splx(s);
82 82 }
83 83
84 84 void
85 85 idle_other_cpus(void)
86 86 {
87 87 int i, cpuid, ntries;
88 88 int failed = 0;
89 89
90 90 if (ncpus == 1)
91 91 return;
92 92
93 93 mutex_enter(&cpu_idle_lock);
94 94
95 95 cpuid = CPU->cpu_id;
96 96 ASSERT(cpuid < NCPU);
97 97
98 98 cpu_idle_set = cpu_ready_set;
99 99 CPUSET_DEL(cpu_idle_set, cpuid);
100 100
101 101 if (CPUSET_ISNULL(cpu_idle_set))
102 102 return;
103 103
104 104 xt_some(cpu_idle_set, (xcfunc_t *)idle_stop_xcall,
105 105 (uint64_t)cpu_idle_self, NULL);
106 106
107 107 for (i = 0; i < NCPU; i++) {
108 108 if (!CPU_IN_SET(cpu_idle_set, i))
109 109 continue;
110 110
111 111 ntries = 0x10000;
112 112 while (!cpu[i]->cpu_m.in_prom && ntries) {
113 113 DELAY(50);
114 114 ntries--;
115 115 }
116 116
117 117 /*
118 118 * A cpu failing to idle is an error condition, since
119 119 * we can't be sure anymore of its state.
120 120 */
121 121 if (!cpu[i]->cpu_m.in_prom) {
122 122 cmn_err(CE_WARN, "cpuid 0x%x failed to idle", i);
123 123 failed++;
124 124 }
125 125 }
126 126
127 127 if (failed) {
128 128 mutex_exit(&cpu_idle_lock);
129 129 cmn_err(CE_PANIC, "idle_other_cpus: not all cpus idled");
130 130 }
131 131 }
132 132
133 133 void
134 134 resume_other_cpus(void)
135 135 {
136 136 int i, ntries;
137 137 int cpuid = CPU->cpu_id;
138 138 boolean_t failed = B_FALSE;
139 139
140 140 if (ncpus == 1)
141 141 return;
142 142
143 143 ASSERT(cpuid < NCPU);
144 144 ASSERT(MUTEX_HELD(&cpu_idle_lock));
145 145
146 146 for (i = 0; i < NCPU; i++) {
147 147 if (!CPU_IN_SET(cpu_idle_set, i))
148 148 continue;
149 149
150 150 kern_idle[i] = 0;
151 151 membar_stld();
152 152 }
153 153
154 154 for (i = 0; i < NCPU; i++) {
155 155 if (!CPU_IN_SET(cpu_idle_set, i))
156 156 continue;
157 157
158 158 ntries = 0x10000;
159 159 while (cpu[i]->cpu_m.in_prom && ntries) {
160 160 DELAY(50);
161 161 ntries--;
162 162 }
163 163
164 164 /*
165 165 * A cpu failing to resume is an error condition, since
166 166 * intrs may have been directed there.
167 167 */
168 168 if (cpu[i]->cpu_m.in_prom) {
169 169 cmn_err(CE_WARN, "cpuid 0x%x failed to resume", i);
170 170 continue;
171 171 }
172 172 CPUSET_DEL(cpu_idle_set, i);
173 173 }
174 174
175 175 failed = !CPUSET_ISNULL(cpu_idle_set);
176 176
177 177 mutex_exit(&cpu_idle_lock);
178 178
179 179 /*
180 180 * Non-zero if a cpu failed to resume
181 181 */
182 182 if (failed)
183 183 cmn_err(CE_PANIC, "resume_other_cpus: not all cpus resumed");
184 184
185 185 }
186 186
187 187 /*
188 188 * Stop all other cpu's before halting or rebooting. We pause the cpu's
189 189 * instead of sending a cross call.
190 190 */
191 191 void
192 192 stop_other_cpus(void)
↓ open down ↓ |
192 lines elided |
↑ open up ↑ |
193 193 {
194 194 mutex_enter(&cpu_lock);
195 195 if (cpu_are_paused) {
196 196 mutex_exit(&cpu_lock);
197 197 return;
198 198 }
199 199
200 200 if (ncpus > 1)
201 201 intr_redist_all_cpus_shutdown();
202 202
203 - pause_cpus(NULL);
203 + pause_cpus(NULL, NULL);
204 204 cpu_are_paused = 1;
205 205
206 206 mutex_exit(&cpu_lock);
207 207 }
208 208
209 209 int cpu_quiesce_microsecond_sanity_limit = 60 * 1000000;
210 210
211 211 void
212 212 mp_cpu_quiesce(cpu_t *cp0)
213 213 {
214 214
215 215 volatile cpu_t *cp = (volatile cpu_t *) cp0;
216 216 int i, sanity_limit = cpu_quiesce_microsecond_sanity_limit;
217 217 int cpuid = cp->cpu_id;
218 218 int found_intr = 1;
219 219 static fn_t f = "mp_cpu_quiesce";
220 220
221 221 ASSERT(CPU->cpu_id != cpuid);
222 222 ASSERT(MUTEX_HELD(&cpu_lock));
223 223 ASSERT(cp->cpu_flags & CPU_QUIESCED);
224 224
225 225
226 226 /*
227 227 * Declare CPU as no longer being READY to process interrupts and
228 228 * wait for them to stop. A CPU that is not READY can no longer
229 229 * participate in x-calls or x-traps.
230 230 */
231 231 cp->cpu_flags &= ~CPU_READY;
232 232 CPUSET_DEL(cpu_ready_set, cpuid);
233 233 membar_sync();
234 234
235 235 for (i = 0; i < sanity_limit; i++) {
236 236 if (cp->cpu_intr_actv == 0 &&
237 237 (cp->cpu_thread == cp->cpu_idle_thread ||
238 238 cp->cpu_thread == cp->cpu_startup_thread)) {
239 239 found_intr = 0;
240 240 break;
241 241 }
242 242 DELAY(1);
243 243 }
244 244
245 245 if (found_intr) {
246 246
247 247 if (cp->cpu_intr_actv) {
248 248 cmn_err(CE_PANIC, "%s: cpu_intr_actv != 0", f);
249 249 } else if (cp->cpu_thread != cp->cpu_idle_thread &&
250 250 cp->cpu_thread != cp->cpu_startup_thread) {
251 251 cmn_err(CE_PANIC, "%s: CPU %d is not quiesced",
252 252 f, cpuid);
253 253 }
254 254
255 255 }
256 256 }
257 257
258 258 /*
259 259 * Start CPU on user request.
260 260 */
261 261 /* ARGSUSED */
262 262 int
263 263 mp_cpu_start(struct cpu *cp)
264 264 {
265 265 ASSERT(MUTEX_HELD(&cpu_lock));
266 266 /*
267 267 * Platforms that use CPU signatures require the signature
268 268 * block update to indicate that this CPU is in the OS now.
269 269 */
270 270 CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, cp->cpu_id);
271 271
272 272 cmp_error_resteer(cp->cpu_id);
273 273
274 274 return (0); /* nothing special to do on this arch */
275 275 }
276 276
277 277 /*
278 278 * Stop CPU on user request.
279 279 */
280 280 /* ARGSUSED */
281 281 int
282 282 mp_cpu_stop(struct cpu *cp)
283 283 {
284 284 ASSERT(MUTEX_HELD(&cpu_lock));
285 285
286 286 cmp_error_resteer(cp->cpu_id);
287 287
288 288 /*
289 289 * Platforms that use CPU signatures require the signature
290 290 * block update to indicate that this CPU is offlined now.
291 291 */
292 292 CPU_SIGNATURE(OS_SIG, SIGST_OFFLINE, SIGSUBST_NULL, cp->cpu_id);
293 293 return (0); /* nothing special to do on this arch */
294 294 }
295 295
296 296 /*
297 297 * Power on CPU.
298 298 */
299 299 int
300 300 mp_cpu_poweron(struct cpu *cp)
301 301 {
302 302 ASSERT(MUTEX_HELD(&cpu_lock));
303 303 if (&plat_cpu_poweron)
304 304 return (plat_cpu_poweron(cp)); /* platform-dependent hook */
305 305
306 306 return (ENOTSUP);
307 307 }
308 308
309 309 /*
310 310 * Power off CPU.
311 311 */
312 312 int
313 313 mp_cpu_poweroff(struct cpu *cp)
314 314 {
315 315 ASSERT(MUTEX_HELD(&cpu_lock));
316 316 if (&plat_cpu_poweroff)
317 317 return (plat_cpu_poweroff(cp)); /* platform-dependent hook */
318 318
319 319 return (ENOTSUP);
320 320 }
321 321
322 322 void
323 323 mp_cpu_faulted_enter(struct cpu *cp)
324 324 {
325 325 cpu_faulted_enter(cp);
326 326 }
327 327
328 328 void
329 329 mp_cpu_faulted_exit(struct cpu *cp)
330 330 {
331 331 cpu_faulted_exit(cp);
332 332 }
↓ open down ↓ |
119 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX