Print this page
XXXX pass in cpu_pause_func via pause_cpus
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4u/serengeti/io/sbdp_quiesce.c
+++ new/usr/src/uts/sun4u/serengeti/io/sbdp_quiesce.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * A CPR derivative specifically for sbd
29 29 */
30 30
31 31 #include <sys/types.h>
32 32 #include <sys/systm.h>
33 33 #include <sys/machparam.h>
34 34 #include <sys/machsystm.h>
35 35 #include <sys/ddi.h>
36 36 #define SUNDDI_IMPL
37 37 #include <sys/sunddi.h>
38 38 #include <sys/sunndi.h>
39 39 #include <sys/devctl.h>
40 40 #include <sys/time.h>
41 41 #include <sys/kmem.h>
42 42 #include <nfs/lm.h>
43 43 #include <sys/ddi_impldefs.h>
44 44 #include <sys/ndi_impldefs.h>
45 45 #include <sys/obpdefs.h>
46 46 #include <sys/cmn_err.h>
47 47 #include <sys/debug.h>
48 48 #include <sys/errno.h>
49 49 #include <sys/callb.h>
50 50 #include <sys/clock.h>
51 51 #include <sys/x_call.h>
52 52 #include <sys/cpuvar.h>
53 53 #include <sys/epm.h>
54 54 #include <sys/vfs.h>
55 55
56 56 #ifdef DEBUG
57 57 #include <sys/note.h>
58 58 #endif
59 59
60 60 #include <sys/promif.h>
61 61 #include <sys/conf.h>
62 62 #include <sys/cyclic.h>
63 63
64 64 #include <sys/sbd_ioctl.h>
65 65 #include <sys/sbd.h>
66 66 #include <sys/sbdp_priv.h>
67 67 #include <sys/cpu_sgnblk_defs.h>
68 68
69 69 static char *
70 70 sbdp_get_err_buf(sbd_error_t *ep)
71 71 {
72 72 return (ep->e_rsc);
73 73 }
74 74
75 75 extern void e_ddi_enter_driver_list(struct devnames *dnp, int *listcnt);
76 76 extern void e_ddi_exit_driver_list(struct devnames *dnp, int listcnt);
77 77 extern int is_pseudo_device(dev_info_t *dip);
78 78
79 79 extern kmutex_t cpu_lock;
80 80
81 81 static int sbdp_is_real_device(dev_info_t *dip);
82 82 #ifdef DEBUG
83 83 static int sbdp_bypass_device(char *dname);
84 84 #endif
85 85 static int sbdp_check_dip(dev_info_t *dip, void *arg, uint_t ref);
86 86
87 87 static int sbdp_resolve_devname(dev_info_t *dip, char *buffer,
88 88 char *alias);
89 89
90 90 int sbdp_test_suspend(sbdp_handle_t *hp);
91 91
92 92 #define SR_STATE(srh) ((srh)->sr_suspend_state)
93 93 #define SR_SET_STATE(srh, state) (SR_STATE((srh)) = (state))
94 94 #define SR_FAILED_DIP(srh) ((srh)->sr_failed_dip)
95 95
96 96 #define SR_FLAG_WATCHDOG 0x1
97 97 #define SR_CHECK_FLAG(srh, flag) ((srh)->sr_flags & (flag))
98 98 #define SR_SET_FLAG(srh, flag) ((srh)->sr_flags |= (flag))
99 99 #define SR_CLEAR_FLAG(srh, flag) ((srh)->sr_flags &= ~(flag))
100 100
101 101 #ifdef DEBUG
102 102 /*
103 103 * Just for testing. List of drivers to bypass when performing a suspend.
104 104 */
105 105 static char *sbdp_bypass_list[] = {
106 106 /* "sgsbbc", this is an example when needed */
107 107 ""
108 108 };
109 109 #endif
110 110
111 111 #define SKIP_SYNC /* bypass sync ops in sbdp_suspend */
112 112
113 113 /*
114 114 * sbdp_skip_user_threads is used to control if user threads should
115 115 * be suspended. If sbdp_skip_user_threads is true, the rest of the
116 116 * flags are not used; if it is false, sbdp_check_user_stop_result
117 117 * will be used to control whether or not we need to check suspend
118 118 * result, and sbdp_allow_blocked_threads will be used to control
119 119 * whether or not we allow suspend to continue if there are blocked
120 120 * threads. We allow all combinations of sbdp_check_user_stop_result
121 121 * and sbdp_allow_block_threads, even though it might not make much
122 122 * sense to not allow block threads when we don't even check stop
123 123 * result.
124 124 */
125 125 static int sbdp_skip_user_threads = 0; /* default to FALSE */
126 126 static int sbdp_check_user_stop_result = 1; /* default to TRUE */
127 127 static int sbdp_allow_blocked_threads = 1; /* default to TRUE */
128 128
129 129
130 130 static void
131 131 sbdp_stop_intr(void)
132 132 {
133 133 kpreempt_disable();
134 134 cyclic_suspend();
135 135 }
136 136
137 137 static void
138 138 sbdp_enable_intr(void)
139 139 {
140 140 cyclic_resume();
141 141 kpreempt_enable();
142 142 }
143 143
144 144 sbdp_sr_handle_t *
145 145 sbdp_get_sr_handle(void)
146 146 {
147 147 sbdp_sr_handle_t *srh;
148 148 srh = kmem_zalloc(sizeof (sbdp_sr_handle_t), KM_SLEEP);
149 149
150 150 return (srh);
151 151 }
152 152
153 153 void
154 154 sbdp_release_sr_handle(sbdp_sr_handle_t *srh)
155 155 {
156 156 ASSERT(SR_FAILED_DIP(srh) == NULL);
157 157 kmem_free((caddr_t)srh, sizeof (sbdp_sr_handle_t));
158 158 }
159 159
160 160 static int
161 161 sbdp_is_real_device(dev_info_t *dip)
162 162 {
163 163 struct regspec *regbuf = NULL;
164 164 int length = 0;
165 165 int rc;
166 166
167 167 if (ddi_get_driver(dip) == NULL)
168 168 return (0);
169 169
170 170 if (DEVI(dip)->devi_pm_flags & (PMC_NEEDS_SR|PMC_PARENTAL_SR))
171 171 return (1);
172 172 if (DEVI(dip)->devi_pm_flags & PMC_NO_SR)
173 173 return (0);
174 174
175 175 /*
176 176 * now the general case
177 177 */
178 178 rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
179 179 (caddr_t)®buf, &length);
180 180 ASSERT(rc != DDI_PROP_NO_MEMORY);
181 181 if (rc != DDI_PROP_SUCCESS) {
182 182 return (0);
183 183 } else {
184 184 if ((length > 0) && (regbuf != NULL))
185 185 kmem_free(regbuf, length);
186 186 return (1);
187 187 }
188 188 }
189 189
190 190 #ifdef DEBUG
191 191 static int
192 192 sbdp_bypass_device(char *dname)
193 193 {
194 194 int i;
195 195 char **lname;
196 196 /* check the bypass list */
197 197 for (i = 0, lname = &sbdp_bypass_list[i]; **lname != '\0'; lname++) {
198 198 SBDP_DBG_QR("Checking %s\n", *lname);
199 199 if (strcmp(dname, sbdp_bypass_list[i++]) == 0)
200 200 return (1);
201 201 }
202 202 return (0);
203 203 }
204 204 #endif
205 205
206 206 static int
207 207 sbdp_resolve_devname(dev_info_t *dip, char *buffer, char *alias)
208 208 {
209 209 major_t devmajor;
210 210 char *aka, *name;
211 211
212 212 *buffer = *alias = 0;
213 213
214 214 if (dip == NULL)
215 215 return (-1);
216 216
217 217 if ((name = ddi_get_name(dip)) == NULL)
218 218 name = "<null name>";
219 219
220 220 aka = name;
221 221
222 222 if ((devmajor = ddi_name_to_major(aka)) != -1)
223 223 aka = ddi_major_to_name(devmajor);
224 224
225 225 (void) strcpy(buffer, name);
226 226
227 227 if (strcmp(name, aka))
228 228 (void) strcpy(alias, aka);
229 229 else
230 230 *alias = 0;
231 231
232 232 return (0);
233 233 }
234 234
235 235 typedef struct sbdp_ref {
236 236 int *refcount;
237 237 int *refcount_non_gldv3;
238 238 sbd_error_t *sep;
239 239 } sbdp_ref_t;
240 240
241 241 static int
242 242 sbdp_check_dip(dev_info_t *dip, void *arg, uint_t ref)
243 243 {
244 244 char *dname;
245 245 sbdp_ref_t *sbrp = (sbdp_ref_t *)arg;
246 246
247 247 if (dip == NULL)
248 248 return (DDI_WALK_CONTINUE);
249 249
250 250 ASSERT(sbrp->sep != NULL);
251 251 ASSERT(sbrp->refcount != NULL);
252 252
253 253 if (!sbdp_is_real_device(dip))
254 254 return (DDI_WALK_CONTINUE);
255 255
256 256 dname = ddi_binding_name(dip);
257 257
258 258 if ((strcmp(dname, "pciclass,060940") == 0) || (strcmp(dname,
259 259 "pciclass,060980") == 0)) {
260 260 (void) ddi_pathname(dip, sbdp_get_err_buf(sbrp->sep));
261 261 sbdp_set_err(sbrp->sep, ESBD_BUSY, NULL);
262 262 (*sbrp->refcount)++;
263 263 return (DDI_WALK_TERMINATE);
264 264 }
265 265
266 266 #ifdef DEBUG
267 267 if (sbdp_bypass_device(dname))
268 268 return (DDI_WALK_CONTINUE);
269 269 #endif
270 270
271 271 if (ref) {
272 272 major_t major;
273 273
274 274 (*sbrp->refcount)++;
275 275 SBDP_DBG_QR("\n%s (major# %d) is referenced\n",
276 276 dname, ddi_name_to_major(dname));
277 277 (void) ddi_pathname(dip, sbdp_get_err_buf(sbrp->sep));
278 278 major = ddi_driver_major(dip);
279 279 if (sbrp->refcount_non_gldv3 && NETWORK_PHYSDRV(major) &&
280 280 !GLDV3_DRV(major)) {
281 281 (*sbrp->refcount_non_gldv3)++;
282 282 return (DDI_WALK_CONTINUE);
283 283 }
284 284 sbdp_set_err(sbrp->sep, ESBD_BUSY, NULL);
285 285 return (DDI_WALK_TERMINATE);
286 286 }
287 287 return (DDI_WALK_CONTINUE);
288 288 }
289 289
290 290 void
291 291 sbdp_check_devices(dev_info_t *dip, int *refcount, sbd_error_t *sep,
292 292 int *refcount_non_gldv3)
293 293 {
294 294 sbdp_ref_t sbr;
295 295
296 296 sbr.refcount = refcount;
297 297 sbr.refcount_non_gldv3 = refcount_non_gldv3;
298 298 sbr.sep = sep;
299 299
300 300 ASSERT(e_ddi_branch_held(dip));
301 301
302 302 (void) e_ddi_branch_referenced(dip, sbdp_check_dip, &sbr);
303 303 }
304 304
305 305 /*
306 306 * Starting from the root node suspend all devices in the device tree.
307 307 * Assumes that all devices have already been marked busy.
308 308 */
309 309 static int
310 310 sbdp_suspend_devices_(dev_info_t *dip, sbdp_sr_handle_t *srh)
311 311 {
312 312 major_t major;
313 313 char *dname;
314 314
315 315 for (; dip != NULL; dip = ddi_get_next_sibling(dip)) {
316 316 char d_name[40], d_alias[40], *d_info;
317 317
318 318 if (sbdp_suspend_devices_(ddi_get_child(dip), srh)) {
319 319 return (ENXIO);
320 320 }
321 321
322 322 if (!sbdp_is_real_device(dip))
323 323 continue;
324 324
325 325 major = (major_t)-1;
326 326 if ((dname = DEVI(dip)->devi_binding_name) != NULL)
327 327 major = ddi_name_to_major(dname);
328 328
329 329 #ifdef DEBUG
330 330 if (sbdp_bypass_device(dname)) {
331 331 SBDP_DBG_QR("bypassed suspend of %s (major# %d)\n",
332 332 dname, major);
333 333 continue;
334 334 }
335 335 #endif
336 336
337 337 if ((d_info = ddi_get_name_addr(dip)) == NULL)
338 338 d_info = "<null>";
339 339
340 340 d_name[0] = 0;
341 341 if (sbdp_resolve_devname(dip, d_name, d_alias) == 0) {
342 342 if (d_alias[0] != 0) {
343 343 SBDP_DBG_QR("\tsuspending %s@%s (aka %s)\n",
344 344 d_name, d_info, d_alias);
345 345 } else {
346 346 SBDP_DBG_QR("\tsuspending %s@%s\n",
347 347 d_name, d_info);
348 348 }
349 349 } else {
350 350 SBDP_DBG_QR("\tsuspending %s@%s\n", dname, d_info);
351 351 }
352 352
353 353 if (devi_detach(dip, DDI_SUSPEND) != DDI_SUCCESS) {
354 354 (void) sprintf(sbdp_get_err_buf(&srh->sep),
355 355 "%d", major);
356 356
357 357 sbdp_set_err(&srh->sep, ESGT_SUSPEND, NULL);
358 358 ndi_hold_devi(dip);
359 359 SR_FAILED_DIP(srh) = dip;
360 360 return (DDI_FAILURE);
361 361 }
362 362 }
363 363
364 364 return (DDI_SUCCESS);
365 365 }
366 366
367 367 /*ARGSUSED*/
368 368 static int
369 369 sbdp_suspend_devices_enter(dev_info_t *dip, void *arg)
370 370 {
371 371 struct dev_info *devi = DEVI(dip);
372 372 ndi_devi_enter(dip, &devi->devi_circular);
373 373 return (DDI_WALK_CONTINUE);
374 374 }
375 375
376 376 /*ARGSUSED*/
377 377 static int
378 378 sbdp_suspend_devices_exit(dev_info_t *dip, void *arg)
379 379 {
380 380 struct dev_info *devi = DEVI(dip);
381 381 ndi_devi_exit(dip, devi->devi_circular);
382 382 return (DDI_WALK_CONTINUE);
383 383 }
384 384
385 385 /*
386 386 * Before suspending devices first mark all device nodes busy. This
387 387 * avoids a deadlock situation when another thread holds a device busy
388 388 * and accesses an already suspended device.
389 389 */
390 390 static int
391 391 sbdp_suspend_devices(dev_info_t *dip, sbdp_sr_handle_t *srh)
392 392 {
393 393 int rv;
394 394
395 395 /* assumes dip is ddi_root_node so no ndi_devi_enter required */
396 396 ASSERT(dip == ddi_root_node());
397 397 ddi_walk_devs(dip, sbdp_suspend_devices_enter, NULL);
398 398 rv = sbdp_suspend_devices_(dip, srh);
399 399 ddi_walk_devs(dip, sbdp_suspend_devices_exit, NULL);
400 400 return (rv);
401 401 }
402 402
403 403 static void
404 404 sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh)
405 405 {
406 406 int circ;
407 407 dev_info_t *dip, *next, *last = NULL;
408 408 char *bn;
409 409 sbd_error_t *sep;
410 410
411 411 sep = &srh->sep;
412 412
413 413 /* attach in reverse device tree order */
414 414 while (last != start) {
415 415 dip = start;
416 416 next = ddi_get_next_sibling(dip);
417 417 while (next != last && dip != SR_FAILED_DIP(srh)) {
418 418 dip = next;
419 419 next = ddi_get_next_sibling(dip);
420 420 }
421 421 if (dip == SR_FAILED_DIP(srh)) {
422 422 /* Release hold acquired in sbdp_suspend_devices() */
423 423 ndi_rele_devi(dip);
424 424 SR_FAILED_DIP(srh) = NULL;
425 425 } else if (sbdp_is_real_device(dip) &&
426 426 SR_FAILED_DIP(srh) == NULL) {
427 427
428 428 if (DEVI(dip)->devi_binding_name != NULL) {
429 429 bn = ddi_binding_name(dip);
430 430 }
431 431 #ifdef DEBUG
432 432 if (!sbdp_bypass_device(bn)) {
433 433 #else
434 434 {
435 435 #endif
436 436 char d_name[40], d_alias[40], *d_info;
437 437
438 438 d_name[0] = 0;
439 439 d_info = ddi_get_name_addr(dip);
440 440 if (d_info == NULL)
441 441 d_info = "<null>";
442 442
443 443 if (!sbdp_resolve_devname(dip, d_name,
444 444 d_alias)) {
445 445 if (d_alias[0] != 0) {
446 446 SBDP_DBG_QR("\tresuming "
447 447 "%s@%s (aka %s)\n",
448 448 d_name, d_info,
449 449 d_alias);
450 450 } else {
451 451 SBDP_DBG_QR("\tresuming "
452 452 "%s@%s\n",
453 453 d_name, d_info);
454 454 }
455 455 } else {
456 456 SBDP_DBG_QR("\tresuming %s@%s\n",
457 457 bn, d_info);
458 458 }
459 459
460 460 if (devi_attach(dip, DDI_RESUME) !=
461 461 DDI_SUCCESS) {
462 462 /*
463 463 * Print a console warning,
464 464 * set an errno of ESGT_RESUME,
465 465 * and save the driver major
466 466 * number in the e_str.
467 467 */
468 468
469 469 (void) sprintf(sbdp_get_err_buf(sep),
470 470 "%s@%s",
471 471 d_name[0] ? d_name : bn, d_info);
472 472 SBDP_DBG_QR("\tFAILED to resume "
473 473 "%s\n", sbdp_get_err_buf(sep));
474 474 sbdp_set_err(sep,
475 475 ESGT_RESUME, NULL);
476 476 }
477 477 }
478 478 }
479 479 ndi_devi_enter(dip, &circ);
480 480 sbdp_resume_devices(ddi_get_child(dip), srh);
481 481 ndi_devi_exit(dip, circ);
482 482 last = dip;
483 483 }
484 484 }
485 485
486 486 /*
487 487 * True if thread is virtually stopped. Similar to CPR_VSTOPPED
488 488 * but from DR point of view. These user threads are waiting in
489 489 * the kernel. Once they return from kernel, they will process
490 490 * the stop signal and stop.
491 491 */
492 492 #define SBDP_VSTOPPED(t) \
493 493 ((t)->t_state == TS_SLEEP && \
494 494 (t)->t_wchan != NULL && \
495 495 (t)->t_astflag && \
496 496 ((t)->t_proc_flag & TP_CHKPT))
497 497
498 498
499 499 static int
500 500 sbdp_stop_user_threads(sbdp_sr_handle_t *srh)
501 501 {
502 502 int count;
503 503 char cache_psargs[PSARGSZ];
504 504 kthread_id_t cache_tp;
505 505 uint_t cache_t_state;
506 506 int bailout;
507 507 sbd_error_t *sep;
508 508 kthread_id_t tp;
509 509
510 510 extern void add_one_utstop();
511 511 extern void utstop_timedwait(clock_t);
512 512 extern void utstop_init(void);
513 513
514 514 #define SBDP_UTSTOP_RETRY 4
515 515 #define SBDP_UTSTOP_WAIT hz
516 516
517 517 if (sbdp_skip_user_threads)
518 518 return (DDI_SUCCESS);
519 519
520 520 sep = &srh->sep;
521 521 ASSERT(sep);
522 522
523 523 utstop_init();
524 524
525 525 /* we need to try a few times to get past fork, etc. */
526 526 for (count = 0; count < SBDP_UTSTOP_RETRY; count++) {
527 527 /* walk the entire threadlist */
528 528 mutex_enter(&pidlock);
529 529 for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
530 530 proc_t *p = ttoproc(tp);
531 531
532 532 /* handle kernel threads separately */
533 533 if (p->p_as == &kas || p->p_stat == SZOMB)
534 534 continue;
535 535
536 536 mutex_enter(&p->p_lock);
537 537 thread_lock(tp);
538 538
539 539 if (tp->t_state == TS_STOPPED) {
540 540 /* add another reason to stop this thread */
541 541 tp->t_schedflag &= ~TS_RESUME;
542 542 } else {
543 543 tp->t_proc_flag |= TP_CHKPT;
544 544
545 545 thread_unlock(tp);
546 546 mutex_exit(&p->p_lock);
547 547 add_one_utstop();
548 548 mutex_enter(&p->p_lock);
549 549 thread_lock(tp);
550 550
551 551 aston(tp);
552 552
553 553 if (ISWAKEABLE(tp) || ISWAITING(tp)) {
554 554 setrun_locked(tp);
555 555 }
556 556 }
557 557
558 558 /* grab thread if needed */
559 559 if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
560 560 poke_cpu(tp->t_cpu->cpu_id);
561 561
562 562
563 563 thread_unlock(tp);
564 564 mutex_exit(&p->p_lock);
565 565 }
566 566 mutex_exit(&pidlock);
567 567
568 568
569 569 /* let everything catch up */
570 570 utstop_timedwait(count * count * SBDP_UTSTOP_WAIT);
571 571
572 572
573 573 /* now, walk the threadlist again to see if we are done */
574 574 mutex_enter(&pidlock);
575 575 for (tp = curthread->t_next, bailout = 0;
576 576 tp != curthread; tp = tp->t_next) {
577 577 proc_t *p = ttoproc(tp);
578 578
579 579 /* handle kernel threads separately */
580 580 if (p->p_as == &kas || p->p_stat == SZOMB)
581 581 continue;
582 582
583 583 /*
584 584 * If this thread didn't stop, and we don't allow
585 585 * unstopped blocked threads, bail.
586 586 */
587 587 thread_lock(tp);
588 588 if (!CPR_ISTOPPED(tp) &&
589 589 !(sbdp_allow_blocked_threads &&
590 590 SBDP_VSTOPPED(tp))) {
591 591
592 592 /* nope, cache the details for later */
593 593 bcopy(p->p_user.u_psargs, cache_psargs,
594 594 sizeof (cache_psargs));
595 595 cache_tp = tp;
596 596 cache_t_state = tp->t_state;
597 597 bailout = 1;
598 598 }
599 599 thread_unlock(tp);
600 600 }
601 601 mutex_exit(&pidlock);
602 602
603 603 /* were all the threads stopped? */
604 604 if (!bailout)
605 605 break;
606 606 }
607 607
608 608 /* were we unable to stop all threads after a few tries? */
609 609 if (bailout) {
610 610 cmn_err(CE_NOTE, "process: %s id: %p state: %x\n",
611 611 cache_psargs, (void *)cache_tp, cache_t_state);
612 612
613 613 (void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs);
614 614 sbdp_set_err(sep, ESGT_UTHREAD, NULL);
615 615 return (ESRCH);
616 616 }
617 617
618 618 return (DDI_SUCCESS);
619 619 }
620 620
621 621 static void
622 622 sbdp_start_user_threads(void)
623 623 {
624 624 kthread_id_t tp;
625 625
626 626 mutex_enter(&pidlock);
627 627
628 628 /* walk all threads and release them */
629 629 for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
630 630 proc_t *p = ttoproc(tp);
631 631
632 632 /* skip kernel threads */
633 633 if (ttoproc(tp)->p_as == &kas)
634 634 continue;
635 635
636 636 mutex_enter(&p->p_lock);
637 637 tp->t_proc_flag &= ~TP_CHKPT;
638 638 mutex_exit(&p->p_lock);
639 639
640 640 thread_lock(tp);
641 641 if (CPR_ISTOPPED(tp)) {
642 642 /* back on the runq */
643 643 tp->t_schedflag |= TS_RESUME;
644 644 setrun_locked(tp);
645 645 }
646 646 thread_unlock(tp);
647 647 }
648 648
649 649 mutex_exit(&pidlock);
650 650 }
651 651
652 652 static void
653 653 sbdp_signal_user(int sig)
654 654 {
655 655 struct proc *p;
656 656
657 657 mutex_enter(&pidlock);
658 658
659 659 for (p = practive; p != NULL; p = p->p_next) {
660 660 /* only user threads */
661 661 if (p->p_exec == NULL || p->p_stat == SZOMB ||
662 662 p == proc_init || p == ttoproc(curthread))
663 663 continue;
664 664
665 665 mutex_enter(&p->p_lock);
666 666 sigtoproc(p, NULL, sig);
667 667 mutex_exit(&p->p_lock);
668 668 }
669 669
670 670 mutex_exit(&pidlock);
671 671
672 672 /* add a bit of delay */
673 673 delay(hz);
674 674 }
675 675
676 676 static uint_t saved_watchdog_seconds;
677 677
678 678 void
679 679 sbdp_resume(sbdp_sr_handle_t *srh)
680 680 {
681 681 /*
682 682 * update the signature block
683 683 */
684 684 CPU_SIGNATURE(OS_SIG, SIGST_RESUME_INPROGRESS, SIGSUBST_NULL,
685 685 CPU->cpu_id);
686 686
687 687 switch (SR_STATE(srh)) {
688 688 case SBDP_SRSTATE_FULL:
689 689
690 690 ASSERT(MUTEX_HELD(&cpu_lock));
691 691
692 692 /*
693 693 * Prevent false alarm in tod_validate() due to tod
694 694 * value change between suspend and resume
695 695 */
696 696 mutex_enter(&tod_lock);
697 697 tod_status_set(TOD_DR_RESUME_DONE);
698 698 mutex_exit(&tod_lock);
699 699
700 700 sbdp_enable_intr(); /* enable intr & clock */
701 701
702 702 /*
703 703 * release all the other cpus
704 704 * using start_cpus() vice sbdp_release_cpus()
705 705 */
706 706 start_cpus();
707 707 mutex_exit(&cpu_lock);
708 708
709 709 /*
710 710 * If we suspended hw watchdog at suspend,
711 711 * re-enable it now.
712 712 */
713 713 if (SR_CHECK_FLAG(srh, SR_FLAG_WATCHDOG)) {
714 714 mutex_enter(&tod_lock);
715 715 tod_ops.tod_set_watchdog_timer(
716 716 saved_watchdog_seconds);
717 717 mutex_exit(&tod_lock);
718 718 }
719 719
720 720 /* FALLTHROUGH */
721 721
722 722 case SBDP_SRSTATE_DRIVER:
723 723 /*
724 724 * resume devices: root node doesn't have to
725 725 * be held in any way.
726 726 */
727 727 sbdp_resume_devices(ddi_root_node(), srh);
728 728
729 729 /*
730 730 * resume the lock manager
731 731 */
732 732 lm_cprresume();
733 733
734 734 /* FALLTHROUGH */
735 735
736 736 case SBDP_SRSTATE_USER:
737 737 /*
738 738 * finally, resume user threads
739 739 */
740 740 if (!sbdp_skip_user_threads) {
741 741 SBDP_DBG_QR("DR: resuming user threads...\n");
742 742 sbdp_start_user_threads();
743 743 }
744 744 /* FALLTHROUGH */
745 745
746 746 case SBDP_SRSTATE_BEGIN:
747 747 default:
748 748 /*
749 749 * let those who care know that we've just resumed
750 750 */
751 751 SBDP_DBG_QR("sending SIGTHAW...\n");
752 752 sbdp_signal_user(SIGTHAW);
753 753 break;
754 754 }
755 755
756 756 /*
757 757 * update the signature block
758 758 */
759 759 CPU_SIGNATURE(OS_SIG, SIGST_RUN, SIGSUBST_NULL, CPU->cpu_id);
760 760
761 761 SBDP_DBG_QR("DR: resume COMPLETED\n");
762 762 }
763 763
764 764 int
765 765 sbdp_suspend(sbdp_sr_handle_t *srh)
766 766 {
767 767 int force;
768 768 int rc = DDI_SUCCESS;
769 769
770 770 force = (srh && (srh->sr_flags & SBDP_IOCTL_FLAG_FORCE));
771 771
772 772 /*
773 773 * if no force flag, check for unsafe drivers
774 774 */
775 775 if (force) {
776 776 SBDP_DBG_QR("\nsbdp_suspend invoked with force flag");
777 777 }
778 778
779 779 /*
780 780 * update the signature block
781 781 */
782 782 CPU_SIGNATURE(OS_SIG, SIGST_QUIESCE_INPROGRESS, SIGSUBST_NULL,
783 783 CPU->cpu_id);
784 784
785 785 /*
786 786 * first, stop all user threads
787 787 */
788 788 SBDP_DBG_QR("SBDP: suspending user threads...\n");
789 789 SR_SET_STATE(srh, SBDP_SRSTATE_USER);
790 790 if (((rc = sbdp_stop_user_threads(srh)) != DDI_SUCCESS) &&
791 791 sbdp_check_user_stop_result) {
792 792 sbdp_resume(srh);
793 793 return (rc);
794 794 }
795 795
796 796 #ifndef SKIP_SYNC
797 797 /*
798 798 * This sync swap out all user pages
799 799 */
800 800 vfs_sync(SYNC_ALL);
801 801 #endif
802 802
803 803 /*
804 804 * special treatment for lock manager
805 805 */
806 806 lm_cprsuspend();
807 807
808 808 #ifndef SKIP_SYNC
809 809 /*
810 810 * sync the file system in case we never make it back
811 811 */
812 812 sync();
813 813
814 814 #endif
815 815 /*
816 816 * now suspend drivers
817 817 */
818 818 SBDP_DBG_QR("SBDP: suspending drivers...\n");
819 819 SR_SET_STATE(srh, SBDP_SRSTATE_DRIVER);
820 820
821 821 /*
822 822 * Root node doesn't have to be held in any way.
823 823 */
824 824 if ((rc = sbdp_suspend_devices(ddi_root_node(), srh)) != DDI_SUCCESS) {
825 825 sbdp_resume(srh);
826 826 return (rc);
827 827 }
828 828
829 829 /*
830 830 * finally, grab all cpus
831 831 */
832 832 SR_SET_STATE(srh, SBDP_SRSTATE_FULL);
833 833
834 834 /*
835 835 * if watchdog was activated, disable it
836 836 */
↓ open down ↓ |
836 lines elided |
↑ open up ↑ |
837 837 if (watchdog_activated) {
838 838 mutex_enter(&tod_lock);
839 839 saved_watchdog_seconds = tod_ops.tod_clear_watchdog_timer();
840 840 mutex_exit(&tod_lock);
841 841 SR_SET_FLAG(srh, SR_FLAG_WATCHDOG);
842 842 } else {
843 843 SR_CLEAR_FLAG(srh, SR_FLAG_WATCHDOG);
844 844 }
845 845
846 846 mutex_enter(&cpu_lock);
847 - pause_cpus(NULL);
847 + pause_cpus(NULL, NULL);
848 848 sbdp_stop_intr();
849 849
850 850 /*
851 851 * update the signature block
852 852 */
853 853 CPU_SIGNATURE(OS_SIG, SIGST_QUIESCED, SIGSUBST_NULL, CPU->cpu_id);
854 854
855 855 return (rc);
856 856 }
857 857
858 858 /*ARGSUSED*/
859 859 int
860 860 sbdp_test_suspend(sbdp_handle_t *hp)
861 861 {
862 862 sbdp_sr_handle_t *srh;
863 863 int err;
864 864
865 865 SBDP_DBG_QR("%s...\n", "sbdp_test_suspend");
866 866
867 867 srh = sbdp_get_sr_handle();
868 868
869 869 srh->sr_flags = hp->h_flags;
870 870
871 871 if ((err = sbdp_suspend(srh)) == DDI_SUCCESS) {
872 872 sbdp_resume(srh);
873 873 } else {
874 874 SBDP_DBG_MISC("sbdp_suspend() failed, err = 0x%x\n", err);
875 875 }
876 876 sbdp_release_sr_handle(srh);
877 877
878 878 return (0);
879 879 }
880 880
881 881 #ifdef DEBUG
882 882 int
883 883 sbdp_passthru_test_quiesce(sbdp_handle_t *hp, void *arg)
884 884 {
885 885 _NOTE(ARGUNUSED(arg))
886 886
887 887 return (sbdp_test_suspend(hp));
888 888 }
889 889 #endif
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX