Print this page
patch as-lock-macro-simplification
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/dumpsubr.c
+++ new/usr/src/uts/common/os/dumpsubr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 #include <sys/types.h>
27 27 #include <sys/param.h>
28 28 #include <sys/systm.h>
29 29 #include <sys/vm.h>
30 30 #include <sys/proc.h>
31 31 #include <sys/file.h>
32 32 #include <sys/conf.h>
33 33 #include <sys/kmem.h>
34 34 #include <sys/mem.h>
35 35 #include <sys/mman.h>
36 36 #include <sys/vnode.h>
37 37 #include <sys/errno.h>
38 38 #include <sys/memlist.h>
39 39 #include <sys/dumphdr.h>
40 40 #include <sys/dumpadm.h>
41 41 #include <sys/ksyms.h>
42 42 #include <sys/compress.h>
43 43 #include <sys/stream.h>
44 44 #include <sys/strsun.h>
45 45 #include <sys/cmn_err.h>
46 46 #include <sys/bitmap.h>
47 47 #include <sys/modctl.h>
48 48 #include <sys/utsname.h>
49 49 #include <sys/systeminfo.h>
50 50 #include <sys/vmem.h>
51 51 #include <sys/log.h>
52 52 #include <sys/var.h>
53 53 #include <sys/debug.h>
54 54 #include <sys/sunddi.h>
55 55 #include <fs/fs_subr.h>
56 56 #include <sys/fs/snode.h>
57 57 #include <sys/ontrap.h>
58 58 #include <sys/panic.h>
59 59 #include <sys/dkio.h>
60 60 #include <sys/vtoc.h>
61 61 #include <sys/errorq.h>
62 62 #include <sys/fm/util.h>
63 63 #include <sys/fs/zfs.h>
64 64
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/page.h>
68 68 #include <vm/pvn.h>
69 69 #include <vm/seg.h>
70 70 #include <vm/seg_kmem.h>
71 71 #include <sys/clock_impl.h>
72 72 #include <sys/hold_page.h>
73 73
74 74 #include <bzip2/bzlib.h>
75 75
76 76 /*
77 77 * Crash dump time is dominated by disk write time. To reduce this,
78 78 * the stronger compression method bzip2 is applied to reduce the dump
79 79 * size and hence reduce I/O time. However, bzip2 is much more
80 80 * computationally expensive than the existing lzjb algorithm, so to
81 81 * avoid increasing compression time, CPUs that are otherwise idle
82 82 * during panic are employed to parallelize the compression task.
83 83 * Many helper CPUs are needed to prevent bzip2 from being a
84 84 * bottleneck, and on systems with too few CPUs, the lzjb algorithm is
85 85 * parallelized instead. Lastly, I/O and compression are performed by
86 86 * different CPUs, and are hence overlapped in time, unlike the older
87 87 * serial code.
88 88 *
89 89 * Another important consideration is the speed of the dump
90 90 * device. Faster disks need less CPUs in order to benefit from
91 91 * parallel lzjb versus parallel bzip2. Therefore, the CPU count
92 92 * threshold for switching from parallel lzjb to paralled bzip2 is
93 93 * elevated for faster disks. The dump device speed is adduced from
94 94 * the setting for dumpbuf.iosize, see dump_update_clevel.
95 95 */
96 96
97 97 /*
98 98 * exported vars
99 99 */
100 100 kmutex_t dump_lock; /* lock for dump configuration */
101 101 dumphdr_t *dumphdr; /* dump header */
102 102 int dump_conflags = DUMP_KERNEL; /* dump configuration flags */
103 103 vnode_t *dumpvp; /* dump device vnode pointer */
104 104 u_offset_t dumpvp_size; /* size of dump device, in bytes */
105 105 char *dumppath; /* pathname of dump device */
106 106 int dump_timeout = 120; /* timeout for dumping pages */
107 107 int dump_timeleft; /* portion of dump_timeout remaining */
108 108 int dump_ioerr; /* dump i/o error */
109 109 int dump_check_used; /* enable check for used pages */
110 110 char *dump_stack_scratch; /* scratch area for saving stack summary */
111 111
112 112 /*
113 113 * Tunables for dump compression and parallelism. These can be set via
114 114 * /etc/system.
115 115 *
116 116 * dump_ncpu_low number of helpers for parallel lzjb
117 117 * This is also the minimum configuration.
118 118 *
119 119 * dump_bzip2_level bzip2 compression level: 1-9
120 120 * Higher numbers give greater compression, but take more memory
121 121 * and time. Memory used per helper is ~(dump_bzip2_level * 1MB).
122 122 *
123 123 * dump_plat_mincpu the cross-over limit for using bzip2 (per platform):
124 124 * if dump_plat_mincpu == 0, then always do single threaded dump
125 125 * if ncpu >= dump_plat_mincpu then try to use bzip2
126 126 *
127 127 * dump_metrics_on if set, metrics are collected in the kernel, passed
128 128 * to savecore via the dump file, and recorded by savecore in
129 129 * METRICS.txt.
130 130 */
131 131 uint_t dump_ncpu_low = 4; /* minimum config for parallel lzjb */
132 132 uint_t dump_bzip2_level = 1; /* bzip2 level (1-9) */
133 133
134 134 /* Use dump_plat_mincpu_default unless this variable is set by /etc/system */
135 135 #define MINCPU_NOT_SET ((uint_t)-1)
136 136 uint_t dump_plat_mincpu = MINCPU_NOT_SET;
137 137
138 138 /* tunables for pre-reserved heap */
139 139 uint_t dump_kmem_permap = 1024;
140 140 uint_t dump_kmem_pages = 8;
141 141
142 142 /* Define multiple buffers per helper to avoid stalling */
143 143 #define NCBUF_PER_HELPER 2
144 144 #define NCMAP_PER_HELPER 4
145 145
146 146 /* minimum number of helpers configured */
147 147 #define MINHELPERS (dump_ncpu_low)
148 148 #define MINCBUFS (MINHELPERS * NCBUF_PER_HELPER)
149 149
150 150 /*
151 151 * Define constant parameters.
152 152 *
153 153 * CBUF_SIZE size of an output buffer
154 154 *
155 155 * CBUF_MAPSIZE size of virtual range for mapping pages
156 156 *
157 157 * CBUF_MAPNP size of virtual range in pages
158 158 *
159 159 */
160 160 #define DUMP_1KB ((size_t)1 << 10)
161 161 #define DUMP_1MB ((size_t)1 << 20)
162 162 #define CBUF_SIZE ((size_t)1 << 17)
163 163 #define CBUF_MAPSHIFT (22)
164 164 #define CBUF_MAPSIZE ((size_t)1 << CBUF_MAPSHIFT)
165 165 #define CBUF_MAPNP ((size_t)1 << (CBUF_MAPSHIFT - PAGESHIFT))
166 166
167 167 /*
168 168 * Compression metrics are accumulated nano-second subtotals. The
169 169 * results are normalized by the number of pages dumped. A report is
170 170 * generated when dumpsys() completes and is saved in the dump image
171 171 * after the trailing dump header.
172 172 *
173 173 * Metrics are always collected. Set the variable dump_metrics_on to
174 174 * cause metrics to be saved in the crash file, where savecore will
175 175 * save it in the file METRICS.txt.
176 176 */
177 177 #define PERPAGES \
178 178 PERPAGE(bitmap) PERPAGE(map) PERPAGE(unmap) \
179 179 PERPAGE(copy) PERPAGE(compress) \
180 180 PERPAGE(write) \
181 181 PERPAGE(inwait) PERPAGE(outwait)
182 182
183 183 typedef struct perpage {
184 184 #define PERPAGE(x) hrtime_t x;
185 185 PERPAGES
186 186 #undef PERPAGE
187 187 } perpage_t;
188 188
189 189 /*
190 190 * This macro controls the code generation for collecting dump
191 191 * performance information. By default, the code is generated, but
192 192 * automatic saving of the information is disabled. If dump_metrics_on
193 193 * is set to 1, the timing information is passed to savecore via the
194 194 * crash file, where it is appended to the file dump-dir/METRICS.txt.
195 195 */
196 196 #define COLLECT_METRICS
197 197
198 198 #ifdef COLLECT_METRICS
199 199 uint_t dump_metrics_on = 0; /* set to 1 to enable recording metrics */
200 200
201 201 #define HRSTART(v, m) v##ts.m = gethrtime()
202 202 #define HRSTOP(v, m) v.m += gethrtime() - v##ts.m
203 203 #define HRBEGIN(v, m, s) v##ts.m = gethrtime(); v.size += s
204 204 #define HREND(v, m) v.m += gethrtime() - v##ts.m
205 205 #define HRNORM(v, m, n) v.m /= (n)
206 206
207 207 #else
208 208 #define HRSTART(v, m)
209 209 #define HRSTOP(v, m)
210 210 #define HRBEGIN(v, m, s)
211 211 #define HREND(v, m)
212 212 #define HRNORM(v, m, n)
213 213 #endif /* COLLECT_METRICS */
214 214
215 215 /*
216 216 * Buffers for copying and compressing memory pages.
217 217 *
218 218 * cbuf_t buffer controllers: used for both input and output.
219 219 *
220 220 * The buffer state indicates how it is being used:
221 221 *
222 222 * CBUF_FREEMAP: CBUF_MAPSIZE virtual address range is available for
223 223 * mapping input pages.
224 224 *
225 225 * CBUF_INREADY: input pages are mapped and ready for compression by a
226 226 * helper.
227 227 *
228 228 * CBUF_USEDMAP: mapping has been consumed by a helper. Needs unmap.
229 229 *
230 230 * CBUF_FREEBUF: CBUF_SIZE output buffer, which is available.
231 231 *
232 232 * CBUF_WRITE: CBUF_SIZE block of compressed pages from a helper,
233 233 * ready to write out.
234 234 *
235 235 * CBUF_ERRMSG: CBUF_SIZE block of error messages from a helper
236 236 * (reports UE errors.)
237 237 */
238 238
239 239 typedef enum cbufstate {
240 240 CBUF_FREEMAP,
241 241 CBUF_INREADY,
242 242 CBUF_USEDMAP,
243 243 CBUF_FREEBUF,
244 244 CBUF_WRITE,
245 245 CBUF_ERRMSG
246 246 } cbufstate_t;
247 247
248 248 typedef struct cbuf cbuf_t;
249 249
250 250 struct cbuf {
251 251 cbuf_t *next; /* next in list */
252 252 cbufstate_t state; /* processing state */
253 253 size_t used; /* amount used */
254 254 size_t size; /* mem size */
255 255 char *buf; /* kmem or vmem */
256 256 pgcnt_t pagenum; /* index to pfn map */
257 257 pgcnt_t bitnum; /* first set bitnum */
258 258 pfn_t pfn; /* first pfn in mapped range */
259 259 int off; /* byte offset to first pfn */
260 260 };
261 261
262 262 static char dump_osimage_uuid[36 + 1];
263 263
264 264 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
265 265 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
266 266 ((ch) >= 'A' && (ch) <= 'F'))
267 267
268 268 /*
269 269 * cqueue_t queues: a uni-directional channel for communication
270 270 * from the master to helper tasks or vice-versa using put and
271 271 * get primitives. Both mappings and data buffers are passed via
272 272 * queues. Producers close a queue when done. The number of
273 273 * active producers is reference counted so the consumer can
274 274 * detect end of data. Concurrent access is mediated by atomic
275 275 * operations for panic dump, or mutex/cv for live dump.
276 276 *
277 277 * There a four queues, used as follows:
278 278 *
279 279 * Queue Dataflow NewState
280 280 * --------------------------------------------------
281 281 * mainq master -> master FREEMAP
282 282 * master has initialized or unmapped an input buffer
283 283 * --------------------------------------------------
284 284 * helperq master -> helper INREADY
285 285 * master has mapped input for use by helper
286 286 * --------------------------------------------------
287 287 * mainq master <- helper USEDMAP
288 288 * helper is done with input
289 289 * --------------------------------------------------
290 290 * freebufq master -> helper FREEBUF
291 291 * master has initialized or written an output buffer
292 292 * --------------------------------------------------
293 293 * mainq master <- helper WRITE
294 294 * block of compressed pages from a helper
295 295 * --------------------------------------------------
296 296 * mainq master <- helper ERRMSG
297 297 * error messages from a helper (memory error case)
298 298 * --------------------------------------------------
299 299 * writerq master <- master WRITE
300 300 * non-blocking queue of blocks to write
301 301 * --------------------------------------------------
302 302 */
303 303 typedef struct cqueue {
304 304 cbuf_t *volatile first; /* first in list */
305 305 cbuf_t *last; /* last in list */
306 306 hrtime_t ts; /* timestamp */
307 307 hrtime_t empty; /* total time empty */
308 308 kmutex_t mutex; /* live state lock */
309 309 kcondvar_t cv; /* live wait var */
310 310 lock_t spinlock; /* panic mode spin lock */
311 311 volatile uint_t open; /* producer ref count */
312 312 } cqueue_t;
313 313
314 314 /*
315 315 * Convenience macros for using the cqueue functions
316 316 * Note that the caller must have defined "dumpsync_t *ds"
317 317 */
318 318 #define CQ_IS_EMPTY(q) \
319 319 (ds->q.first == NULL)
320 320
321 321 #define CQ_OPEN(q) \
322 322 atomic_inc_uint(&ds->q.open)
323 323
324 324 #define CQ_CLOSE(q) \
325 325 dumpsys_close_cq(&ds->q, ds->live)
326 326
327 327 #define CQ_PUT(q, cp, st) \
328 328 dumpsys_put_cq(&ds->q, cp, st, ds->live)
329 329
330 330 #define CQ_GET(q) \
331 331 dumpsys_get_cq(&ds->q, ds->live)
332 332
333 333 /*
334 334 * Dynamic state when dumpsys() is running.
335 335 */
336 336 typedef struct dumpsync {
337 337 pgcnt_t npages; /* subtotal of pages dumped */
338 338 pgcnt_t pages_mapped; /* subtotal of pages mapped */
339 339 pgcnt_t pages_used; /* subtotal of pages used per map */
340 340 size_t nwrite; /* subtotal of bytes written */
341 341 uint_t live; /* running live dump */
342 342 uint_t neednl; /* will need to print a newline */
343 343 uint_t percent; /* dump progress */
344 344 uint_t percent_done; /* dump progress reported */
345 345 cqueue_t freebufq; /* free kmem bufs for writing */
346 346 cqueue_t mainq; /* input for main task */
347 347 cqueue_t helperq; /* input for helpers */
348 348 cqueue_t writerq; /* input for writer */
349 349 hrtime_t start; /* start time */
350 350 hrtime_t elapsed; /* elapsed time when completed */
351 351 hrtime_t iotime; /* time spent writing nwrite bytes */
352 352 hrtime_t iowait; /* time spent waiting for output */
353 353 hrtime_t iowaitts; /* iowait timestamp */
354 354 perpage_t perpage; /* metrics */
355 355 perpage_t perpagets;
356 356 int dumpcpu; /* master cpu */
357 357 } dumpsync_t;
358 358
359 359 static dumpsync_t dumpsync; /* synchronization vars */
360 360
361 361 /*
362 362 * helper_t helpers: contains the context for a stream. CPUs run in
363 363 * parallel at dump time; each CPU creates a single stream of
364 364 * compression data. Stream data is divided into CBUF_SIZE blocks.
365 365 * The blocks are written in order within a stream. But, blocks from
366 366 * multiple streams can be interleaved. Each stream is identified by a
367 367 * unique tag.
368 368 */
369 369 typedef struct helper {
370 370 int helper; /* bound helper id */
371 371 int tag; /* compression stream tag */
372 372 perpage_t perpage; /* per page metrics */
373 373 perpage_t perpagets; /* per page metrics (timestamps) */
374 374 taskqid_t taskqid; /* live dump task ptr */
375 375 int in, out; /* buffer offsets */
376 376 cbuf_t *cpin, *cpout, *cperr; /* cbuf objects in process */
377 377 dumpsync_t *ds; /* pointer to sync vars */
378 378 size_t used; /* counts input consumed */
379 379 char *page; /* buffer for page copy */
380 380 char *lzbuf; /* lzjb output */
381 381 bz_stream bzstream; /* bzip2 state */
382 382 } helper_t;
383 383
384 384 #define MAINHELPER (-1) /* helper is also the main task */
385 385 #define FREEHELPER (-2) /* unbound helper */
386 386 #define DONEHELPER (-3) /* helper finished */
387 387
388 388 /*
389 389 * configuration vars for dumpsys
390 390 */
391 391 typedef struct dumpcfg {
392 392 int threshold; /* ncpu threshold for bzip2 */
393 393 int nhelper; /* number of helpers */
394 394 int nhelper_used; /* actual number of helpers used */
395 395 int ncmap; /* number VA pages for compression */
396 396 int ncbuf; /* number of bufs for compression */
397 397 int ncbuf_used; /* number of bufs in use */
398 398 uint_t clevel; /* dump compression level */
399 399 helper_t *helper; /* array of helpers */
400 400 cbuf_t *cmap; /* array of input (map) buffers */
401 401 cbuf_t *cbuf; /* array of output buffers */
402 402 ulong_t *helpermap; /* set of dumpsys helper CPU ids */
403 403 ulong_t *bitmap; /* bitmap for marking pages to dump */
404 404 ulong_t *rbitmap; /* bitmap for used CBUF_MAPSIZE ranges */
405 405 pgcnt_t bitmapsize; /* size of bitmap */
406 406 pgcnt_t rbitmapsize; /* size of bitmap for ranges */
407 407 pgcnt_t found4m; /* number ranges allocated by dump */
408 408 pgcnt_t foundsm; /* number small pages allocated by dump */
409 409 pid_t *pids; /* list of process IDs at dump time */
410 410 size_t maxsize; /* memory size needed at dump time */
411 411 size_t maxvmsize; /* size of reserved VM */
412 412 char *maxvm; /* reserved VM for spare pages */
413 413 lock_t helper_lock; /* protect helper state */
414 414 char helpers_wanted; /* flag to enable parallelism */
415 415 } dumpcfg_t;
416 416
417 417 static dumpcfg_t dumpcfg; /* config vars */
418 418
419 419 /*
420 420 * The dump I/O buffer.
421 421 *
422 422 * There is one I/O buffer used by dumpvp_write and dumvp_flush. It is
423 423 * sized according to the optimum device transfer speed.
424 424 */
425 425 typedef struct dumpbuf {
426 426 vnode_t *cdev_vp; /* VCHR open of the dump device */
427 427 len_t vp_limit; /* maximum write offset */
428 428 offset_t vp_off; /* current dump device offset */
429 429 char *cur; /* dump write pointer */
430 430 char *start; /* dump buffer address */
431 431 char *end; /* dump buffer end */
432 432 size_t size; /* size of dumpbuf in bytes */
433 433 size_t iosize; /* best transfer size for device */
434 434 } dumpbuf_t;
435 435
436 436 dumpbuf_t dumpbuf; /* I/O buffer */
437 437
438 438 /*
439 439 * The dump I/O buffer must be at least one page, at most xfer_size
440 440 * bytes, and should scale with physmem in between. The transfer size
441 441 * passed in will either represent a global default (maxphys) or the
442 442 * best size for the device. The size of the dumpbuf I/O buffer is
443 443 * limited by dumpbuf_limit (8MB by default) because the dump
444 444 * performance saturates beyond a certain size. The default is to
445 445 * select 1/4096 of the memory.
446 446 */
447 447 static int dumpbuf_fraction = 12; /* memory size scale factor */
448 448 static size_t dumpbuf_limit = 8 * DUMP_1MB; /* max I/O buf size */
449 449
450 450 static size_t
451 451 dumpbuf_iosize(size_t xfer_size)
452 452 {
453 453 size_t iosize = ptob(physmem >> dumpbuf_fraction);
454 454
455 455 if (iosize < PAGESIZE)
456 456 iosize = PAGESIZE;
457 457 else if (iosize > xfer_size)
458 458 iosize = xfer_size;
459 459 if (iosize > dumpbuf_limit)
460 460 iosize = dumpbuf_limit;
461 461 return (iosize & PAGEMASK);
462 462 }
463 463
464 464 /*
465 465 * resize the I/O buffer
466 466 */
467 467 static void
468 468 dumpbuf_resize(void)
469 469 {
470 470 char *old_buf = dumpbuf.start;
471 471 size_t old_size = dumpbuf.size;
472 472 char *new_buf;
473 473 size_t new_size;
474 474
475 475 ASSERT(MUTEX_HELD(&dump_lock));
476 476
477 477 new_size = dumpbuf_iosize(MAX(dumpbuf.iosize, maxphys));
478 478 if (new_size <= old_size)
479 479 return; /* no need to reallocate buffer */
480 480
481 481 new_buf = kmem_alloc(new_size, KM_SLEEP);
482 482 dumpbuf.size = new_size;
483 483 dumpbuf.start = new_buf;
484 484 dumpbuf.end = new_buf + new_size;
485 485 kmem_free(old_buf, old_size);
486 486 }
487 487
488 488 /*
489 489 * dump_update_clevel is called when dumpadm configures the dump device.
490 490 * Calculate number of helpers and buffers.
491 491 * Allocate the minimum configuration for now.
492 492 *
493 493 * When the dump file is configured we reserve a minimum amount of
494 494 * memory for use at crash time. But we reserve VA for all the memory
495 495 * we really want in order to do the fastest dump possible. The VA is
496 496 * backed by pages not being dumped, according to the bitmap. If
497 497 * there is insufficient spare memory, however, we fall back to the
498 498 * minimum.
499 499 *
500 500 * Live dump (savecore -L) always uses the minimum config.
501 501 *
502 502 * clevel 0 is single threaded lzjb
503 503 * clevel 1 is parallel lzjb
504 504 * clevel 2 is parallel bzip2
505 505 *
506 506 * The ncpu threshold is selected with dump_plat_mincpu.
507 507 * On OPL, set_platform_defaults() overrides the sun4u setting.
508 508 * The actual values are defined via DUMP_PLAT_*_MINCPU macros.
509 509 *
510 510 * Architecture Threshold Algorithm
511 511 * sun4u < 51 parallel lzjb
512 512 * sun4u >= 51 parallel bzip2(*)
513 513 * sun4u OPL < 8 parallel lzjb
514 514 * sun4u OPL >= 8 parallel bzip2(*)
515 515 * sun4v < 128 parallel lzjb
516 516 * sun4v >= 128 parallel bzip2(*)
517 517 * x86 < 11 parallel lzjb
518 518 * x86 >= 11 parallel bzip2(*)
519 519 * 32-bit N/A single-threaded lzjb
520 520 *
521 521 * (*) bzip2 is only chosen if there is sufficient available
522 522 * memory for buffers at dump time. See dumpsys_get_maxmem().
523 523 *
524 524 * Faster dump devices have larger I/O buffers. The threshold value is
525 525 * increased according to the size of the dump I/O buffer, because
526 526 * parallel lzjb performs better with faster disks. For buffers >= 1MB
527 527 * the threshold is 3X; for buffers >= 256K threshold is 2X.
528 528 *
529 529 * For parallel dumps, the number of helpers is ncpu-1. The CPU
530 530 * running panic runs the main task. For single-threaded dumps, the
531 531 * panic CPU does lzjb compression (it is tagged as MAINHELPER.)
532 532 *
533 533 * Need multiple buffers per helper so that they do not block waiting
534 534 * for the main task.
535 535 * parallel single-threaded
536 536 * Number of output buffers: nhelper*2 1
537 537 * Number of mapping buffers: nhelper*4 1
538 538 *
539 539 */
540 540 static void
541 541 dump_update_clevel()
542 542 {
543 543 int tag;
544 544 size_t bz2size;
545 545 helper_t *hp, *hpend;
546 546 cbuf_t *cp, *cpend;
547 547 dumpcfg_t *old = &dumpcfg;
548 548 dumpcfg_t newcfg = *old;
549 549 dumpcfg_t *new = &newcfg;
550 550
551 551 ASSERT(MUTEX_HELD(&dump_lock));
552 552
553 553 /*
554 554 * Free the previously allocated bufs and VM.
555 555 */
556 556 if (old->helper != NULL) {
557 557
558 558 /* helpers */
559 559 hpend = &old->helper[old->nhelper];
560 560 for (hp = old->helper; hp != hpend; hp++) {
561 561 if (hp->lzbuf != NULL)
562 562 kmem_free(hp->lzbuf, PAGESIZE);
563 563 if (hp->page != NULL)
564 564 kmem_free(hp->page, PAGESIZE);
565 565 }
566 566 kmem_free(old->helper, old->nhelper * sizeof (helper_t));
567 567
568 568 /* VM space for mapping pages */
569 569 cpend = &old->cmap[old->ncmap];
570 570 for (cp = old->cmap; cp != cpend; cp++)
571 571 vmem_xfree(heap_arena, cp->buf, CBUF_MAPSIZE);
572 572 kmem_free(old->cmap, old->ncmap * sizeof (cbuf_t));
573 573
574 574 /* output bufs */
575 575 cpend = &old->cbuf[old->ncbuf];
576 576 for (cp = old->cbuf; cp != cpend; cp++)
577 577 if (cp->buf != NULL)
578 578 kmem_free(cp->buf, cp->size);
579 579 kmem_free(old->cbuf, old->ncbuf * sizeof (cbuf_t));
580 580
581 581 /* reserved VM for dumpsys_get_maxmem */
582 582 if (old->maxvmsize > 0)
583 583 vmem_xfree(heap_arena, old->maxvm, old->maxvmsize);
584 584 }
585 585
586 586 /*
587 587 * Allocate memory and VM.
588 588 * One CPU runs dumpsys, the rest are helpers.
589 589 */
590 590 new->nhelper = ncpus - 1;
591 591 if (new->nhelper < 1)
592 592 new->nhelper = 1;
593 593
594 594 if (new->nhelper > DUMP_MAX_NHELPER)
595 595 new->nhelper = DUMP_MAX_NHELPER;
596 596
597 597 /* use platform default, unless /etc/system overrides */
598 598 if (dump_plat_mincpu == MINCPU_NOT_SET)
599 599 dump_plat_mincpu = dump_plat_mincpu_default;
600 600
601 601 /* increase threshold for faster disks */
602 602 new->threshold = dump_plat_mincpu;
603 603 if (dumpbuf.iosize >= DUMP_1MB)
604 604 new->threshold *= 3;
605 605 else if (dumpbuf.iosize >= (256 * DUMP_1KB))
606 606 new->threshold *= 2;
607 607
608 608 /* figure compression level based upon the computed threshold. */
609 609 if (dump_plat_mincpu == 0 || new->nhelper < 2) {
610 610 new->clevel = 0;
611 611 new->nhelper = 1;
612 612 } else if ((new->nhelper + 1) >= new->threshold) {
613 613 new->clevel = DUMP_CLEVEL_BZIP2;
614 614 } else {
615 615 new->clevel = DUMP_CLEVEL_LZJB;
616 616 }
617 617
618 618 if (new->clevel == 0) {
619 619 new->ncbuf = 1;
620 620 new->ncmap = 1;
621 621 } else {
622 622 new->ncbuf = NCBUF_PER_HELPER * new->nhelper;
623 623 new->ncmap = NCMAP_PER_HELPER * new->nhelper;
624 624 }
625 625
626 626 /*
627 627 * Allocate new data structures and buffers for MINHELPERS,
628 628 * and also figure the max desired size.
629 629 */
630 630 bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
631 631 new->maxsize = 0;
632 632 new->maxvmsize = 0;
633 633 new->maxvm = NULL;
634 634 tag = 1;
635 635 new->helper = kmem_zalloc(new->nhelper * sizeof (helper_t), KM_SLEEP);
636 636 hpend = &new->helper[new->nhelper];
637 637 for (hp = new->helper; hp != hpend; hp++) {
638 638 hp->tag = tag++;
639 639 if (hp < &new->helper[MINHELPERS]) {
640 640 hp->lzbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
641 641 hp->page = kmem_alloc(PAGESIZE, KM_SLEEP);
642 642 } else if (new->clevel < DUMP_CLEVEL_BZIP2) {
643 643 new->maxsize += 2 * PAGESIZE;
644 644 } else {
645 645 new->maxsize += PAGESIZE;
646 646 }
647 647 if (new->clevel >= DUMP_CLEVEL_BZIP2)
648 648 new->maxsize += bz2size;
649 649 }
650 650
651 651 new->cbuf = kmem_zalloc(new->ncbuf * sizeof (cbuf_t), KM_SLEEP);
652 652 cpend = &new->cbuf[new->ncbuf];
653 653 for (cp = new->cbuf; cp != cpend; cp++) {
654 654 cp->state = CBUF_FREEBUF;
655 655 cp->size = CBUF_SIZE;
656 656 if (cp < &new->cbuf[MINCBUFS])
657 657 cp->buf = kmem_alloc(cp->size, KM_SLEEP);
658 658 else
659 659 new->maxsize += cp->size;
660 660 }
661 661
662 662 new->cmap = kmem_zalloc(new->ncmap * sizeof (cbuf_t), KM_SLEEP);
663 663 cpend = &new->cmap[new->ncmap];
664 664 for (cp = new->cmap; cp != cpend; cp++) {
665 665 cp->state = CBUF_FREEMAP;
666 666 cp->size = CBUF_MAPSIZE;
667 667 cp->buf = vmem_xalloc(heap_arena, CBUF_MAPSIZE, CBUF_MAPSIZE,
668 668 0, 0, NULL, NULL, VM_SLEEP);
669 669 }
670 670
671 671 /* reserve VA to be backed with spare pages at crash time */
672 672 if (new->maxsize > 0) {
673 673 new->maxsize = P2ROUNDUP(new->maxsize, PAGESIZE);
674 674 new->maxvmsize = P2ROUNDUP(new->maxsize, CBUF_MAPSIZE);
675 675 new->maxvm = vmem_xalloc(heap_arena, new->maxvmsize,
676 676 CBUF_MAPSIZE, 0, 0, NULL, NULL, VM_SLEEP);
677 677 }
678 678
679 679 /*
680 680 * Reserve memory for kmem allocation calls made during crash
681 681 * dump. The hat layer allocates memory for each mapping
682 682 * created, and the I/O path allocates buffers and data structs.
683 683 * Add a few pages for safety.
684 684 */
685 685 kmem_dump_init((new->ncmap * dump_kmem_permap) +
686 686 (dump_kmem_pages * PAGESIZE));
687 687
688 688 /* set new config pointers */
689 689 *old = *new;
690 690 }
691 691
692 692 /*
693 693 * Define a struct memlist walker to optimize bitnum to pfn
694 694 * lookup. The walker maintains the state of the list traversal.
695 695 */
696 696 typedef struct dumpmlw {
697 697 struct memlist *mp; /* current memlist */
698 698 pgcnt_t basenum; /* bitnum base offset */
699 699 pgcnt_t mppages; /* current memlist size */
700 700 pgcnt_t mpleft; /* size to end of current memlist */
701 701 pfn_t mpaddr; /* first pfn in memlist */
702 702 } dumpmlw_t;
703 703
704 704 /* initialize the walker */
705 705 static inline void
706 706 dump_init_memlist_walker(dumpmlw_t *pw)
707 707 {
708 708 pw->mp = phys_install;
709 709 pw->basenum = 0;
710 710 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
711 711 pw->mpleft = pw->mppages;
712 712 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
713 713 }
714 714
715 715 /*
716 716 * Lookup pfn given bitnum. The memlist can be quite long on some
717 717 * systems (e.g.: one per board). To optimize sequential lookups, the
718 718 * caller initializes and presents a memlist walker.
719 719 */
720 720 static pfn_t
721 721 dump_bitnum_to_pfn(pgcnt_t bitnum, dumpmlw_t *pw)
722 722 {
723 723 bitnum -= pw->basenum;
724 724 while (pw->mp != NULL) {
725 725 if (bitnum < pw->mppages) {
726 726 pw->mpleft = pw->mppages - bitnum;
727 727 return (pw->mpaddr + bitnum);
728 728 }
729 729 bitnum -= pw->mppages;
730 730 pw->basenum += pw->mppages;
731 731 pw->mp = pw->mp->ml_next;
732 732 if (pw->mp != NULL) {
733 733 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
734 734 pw->mpleft = pw->mppages;
735 735 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
736 736 }
737 737 }
738 738 return (PFN_INVALID);
739 739 }
740 740
741 741 static pgcnt_t
742 742 dump_pfn_to_bitnum(pfn_t pfn)
743 743 {
744 744 struct memlist *mp;
745 745 pgcnt_t bitnum = 0;
746 746
747 747 for (mp = phys_install; mp != NULL; mp = mp->ml_next) {
748 748 if (pfn >= (mp->ml_address >> PAGESHIFT) &&
749 749 pfn < ((mp->ml_address + mp->ml_size) >> PAGESHIFT))
750 750 return (bitnum + pfn - (mp->ml_address >> PAGESHIFT));
751 751 bitnum += mp->ml_size >> PAGESHIFT;
752 752 }
753 753 return ((pgcnt_t)-1);
754 754 }
755 755
756 756 /*
757 757 * Set/test bitmap for a CBUF_MAPSIZE range which includes pfn. The
758 758 * mapping of pfn to range index is imperfect because pfn and bitnum
759 759 * do not have the same phase. To make sure a CBUF_MAPSIZE range is
760 760 * covered, call this for both ends:
761 761 * dump_set_used(base)
762 762 * dump_set_used(base+CBUF_MAPNP-1)
763 763 *
764 764 * This is used during a panic dump to mark pages allocated by
765 765 * dumpsys_get_maxmem(). The macro IS_DUMP_PAGE(pp) is used by
766 766 * page_get_mnode_freelist() to make sure pages used by dump are never
767 767 * allocated.
768 768 */
769 769 #define CBUF_MAPP2R(pfn) ((pfn) >> (CBUF_MAPSHIFT - PAGESHIFT))
770 770
771 771 static void
772 772 dump_set_used(pfn_t pfn)
773 773 {
774 774
775 775 pgcnt_t bitnum, rbitnum;
776 776
777 777 bitnum = dump_pfn_to_bitnum(pfn);
778 778 ASSERT(bitnum != (pgcnt_t)-1);
779 779
780 780 rbitnum = CBUF_MAPP2R(bitnum);
781 781 ASSERT(rbitnum < dumpcfg.rbitmapsize);
782 782
783 783 BT_SET(dumpcfg.rbitmap, rbitnum);
784 784 }
785 785
786 786 int
787 787 dump_test_used(pfn_t pfn)
788 788 {
789 789 pgcnt_t bitnum, rbitnum;
790 790
791 791 bitnum = dump_pfn_to_bitnum(pfn);
792 792 ASSERT(bitnum != (pgcnt_t)-1);
793 793
794 794 rbitnum = CBUF_MAPP2R(bitnum);
795 795 ASSERT(rbitnum < dumpcfg.rbitmapsize);
796 796
797 797 return (BT_TEST(dumpcfg.rbitmap, rbitnum));
798 798 }
799 799
800 800 /*
801 801 * dumpbzalloc and dumpbzfree are callbacks from the bzip2 library.
802 802 * dumpsys_get_maxmem() uses them for BZ2_bzCompressInit().
803 803 */
804 804 static void *
805 805 dumpbzalloc(void *opaque, int items, int size)
806 806 {
807 807 size_t *sz;
808 808 char *ret;
809 809
810 810 ASSERT(opaque != NULL);
811 811 sz = opaque;
812 812 ret = dumpcfg.maxvm + *sz;
813 813 *sz += items * size;
814 814 *sz = P2ROUNDUP(*sz, BZ2_BZALLOC_ALIGN);
815 815 ASSERT(*sz <= dumpcfg.maxvmsize);
816 816 return (ret);
817 817 }
818 818
819 819 /*ARGSUSED*/
820 820 static void
821 821 dumpbzfree(void *opaque, void *addr)
822 822 {
823 823 }
824 824
825 825 /*
826 826 * Perform additional checks on the page to see if we can really use
827 827 * it. The kernel (kas) pages are always set in the bitmap. However,
828 828 * boot memory pages (prom_ppages or P_BOOTPAGES) are not in the
829 829 * bitmap. So we check for them.
830 830 */
831 831 static inline int
832 832 dump_pfn_check(pfn_t pfn)
833 833 {
834 834 page_t *pp = page_numtopp_nolock(pfn);
835 835 if (pp == NULL || pp->p_pagenum != pfn ||
836 836 #if defined(__sparc)
837 837 pp->p_vnode == &promvp ||
838 838 #else
839 839 PP_ISBOOTPAGES(pp) ||
840 840 #endif
841 841 pp->p_toxic != 0)
842 842 return (0);
843 843 return (1);
844 844 }
845 845
846 846 /*
847 847 * Check a range to see if all contained pages are available and
848 848 * return non-zero if the range can be used.
849 849 */
850 850 static inline int
851 851 dump_range_check(pgcnt_t start, pgcnt_t end, pfn_t pfn)
852 852 {
853 853 for (; start < end; start++, pfn++) {
854 854 if (BT_TEST(dumpcfg.bitmap, start))
855 855 return (0);
856 856 if (!dump_pfn_check(pfn))
857 857 return (0);
858 858 }
859 859 return (1);
860 860 }
861 861
862 862 /*
863 863 * dumpsys_get_maxmem() is called during panic. Find unused ranges
864 864 * and use them for buffers. If we find enough memory switch to
865 865 * parallel bzip2, otherwise use parallel lzjb.
866 866 *
867 867 * It searches the dump bitmap in 2 passes. The first time it looks
868 868 * for CBUF_MAPSIZE ranges. On the second pass it uses small pages.
869 869 */
870 870 static void
871 871 dumpsys_get_maxmem()
872 872 {
873 873 dumpcfg_t *cfg = &dumpcfg;
874 874 cbuf_t *endcp = &cfg->cbuf[cfg->ncbuf];
875 875 helper_t *endhp = &cfg->helper[cfg->nhelper];
876 876 pgcnt_t bitnum, end;
877 877 size_t sz, endsz, bz2size;
878 878 pfn_t pfn, off;
879 879 cbuf_t *cp;
880 880 helper_t *hp, *ohp;
881 881 dumpmlw_t mlw;
882 882 int k;
883 883
884 884 /*
885 885 * Setting dump_plat_mincpu to 0 at any time forces a serial
886 886 * dump.
887 887 */
888 888 if (dump_plat_mincpu == 0) {
889 889 cfg->clevel = 0;
890 890 return;
891 891 }
892 892
893 893 /*
894 894 * There may be no point in looking for spare memory. If
895 895 * dumping all memory, then none is spare. If doing a serial
896 896 * dump, then already have buffers.
897 897 */
898 898 if (cfg->maxsize == 0 || cfg->clevel < DUMP_CLEVEL_LZJB ||
899 899 (dump_conflags & DUMP_ALL) != 0) {
900 900 if (cfg->clevel > DUMP_CLEVEL_LZJB)
901 901 cfg->clevel = DUMP_CLEVEL_LZJB;
902 902 return;
903 903 }
904 904
905 905 sz = 0;
906 906 cfg->found4m = 0;
907 907 cfg->foundsm = 0;
908 908
909 909 /* bitmap of ranges used to estimate which pfns are being used */
910 910 bzero(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.rbitmapsize));
911 911
912 912 /* find ranges that are not being dumped to use for buffers */
913 913 dump_init_memlist_walker(&mlw);
914 914 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
915 915 dump_timeleft = dump_timeout;
916 916 end = bitnum + CBUF_MAPNP;
917 917 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
918 918 ASSERT(pfn != PFN_INVALID);
919 919
920 920 /* skip partial range at end of mem segment */
921 921 if (mlw.mpleft < CBUF_MAPNP) {
922 922 end = bitnum + mlw.mpleft;
923 923 continue;
924 924 }
925 925
926 926 /* skip non aligned pages */
927 927 off = P2PHASE(pfn, CBUF_MAPNP);
928 928 if (off != 0) {
929 929 end -= off;
930 930 continue;
931 931 }
932 932
933 933 if (!dump_range_check(bitnum, end, pfn))
934 934 continue;
935 935
936 936 ASSERT((sz + CBUF_MAPSIZE) <= cfg->maxvmsize);
937 937 hat_devload(kas.a_hat, cfg->maxvm + sz, CBUF_MAPSIZE, pfn,
938 938 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
939 939 sz += CBUF_MAPSIZE;
940 940 cfg->found4m++;
941 941
942 942 /* set the bitmap for both ends to be sure to cover the range */
943 943 dump_set_used(pfn);
944 944 dump_set_used(pfn + CBUF_MAPNP - 1);
945 945
946 946 if (sz >= cfg->maxsize)
947 947 goto foundmax;
948 948 }
949 949
950 950 /* Add small pages if we can't find enough large pages. */
951 951 dump_init_memlist_walker(&mlw);
952 952 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
953 953 dump_timeleft = dump_timeout;
954 954 end = bitnum + CBUF_MAPNP;
955 955 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
956 956 ASSERT(pfn != PFN_INVALID);
957 957
958 958 /* Find any non-aligned pages at start and end of segment. */
959 959 off = P2PHASE(pfn, CBUF_MAPNP);
960 960 if (mlw.mpleft < CBUF_MAPNP) {
961 961 end = bitnum + mlw.mpleft;
962 962 } else if (off != 0) {
963 963 end -= off;
964 964 } else if (cfg->found4m && dump_test_used(pfn)) {
965 965 continue;
966 966 }
967 967
968 968 for (; bitnum < end; bitnum++, pfn++) {
969 969 dump_timeleft = dump_timeout;
970 970 if (BT_TEST(dumpcfg.bitmap, bitnum))
971 971 continue;
972 972 if (!dump_pfn_check(pfn))
973 973 continue;
974 974 ASSERT((sz + PAGESIZE) <= cfg->maxvmsize);
975 975 hat_devload(kas.a_hat, cfg->maxvm + sz, PAGESIZE, pfn,
976 976 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
977 977 sz += PAGESIZE;
978 978 cfg->foundsm++;
979 979 dump_set_used(pfn);
980 980 if (sz >= cfg->maxsize)
981 981 goto foundmax;
982 982 }
983 983 }
984 984
985 985 /* Fall back to lzjb if we did not get enough memory for bzip2. */
986 986 endsz = (cfg->maxsize * cfg->threshold) / cfg->nhelper;
987 987 if (sz < endsz) {
988 988 cfg->clevel = DUMP_CLEVEL_LZJB;
989 989 }
990 990
991 991 /* Allocate memory for as many helpers as we can. */
992 992 foundmax:
993 993
994 994 /* Byte offsets into memory found and mapped above */
995 995 endsz = sz;
996 996 sz = 0;
997 997
998 998 /* Set the size for bzip2 state. Only bzip2 needs it. */
999 999 bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
1000 1000
1001 1001 /* Skip the preallocate output buffers. */
1002 1002 cp = &cfg->cbuf[MINCBUFS];
1003 1003
1004 1004 /* Use this to move memory up from the preallocated helpers. */
1005 1005 ohp = cfg->helper;
1006 1006
1007 1007 /* Loop over all helpers and allocate memory. */
1008 1008 for (hp = cfg->helper; hp < endhp; hp++) {
1009 1009
1010 1010 /* Skip preallocated helpers by checking hp->page. */
1011 1011 if (hp->page == NULL) {
1012 1012 if (cfg->clevel <= DUMP_CLEVEL_LZJB) {
1013 1013 /* lzjb needs 2 1-page buffers */
1014 1014 if ((sz + (2 * PAGESIZE)) > endsz)
1015 1015 break;
1016 1016 hp->page = cfg->maxvm + sz;
1017 1017 sz += PAGESIZE;
1018 1018 hp->lzbuf = cfg->maxvm + sz;
1019 1019 sz += PAGESIZE;
1020 1020
1021 1021 } else if (ohp->lzbuf != NULL) {
1022 1022 /* re-use the preallocted lzjb page for bzip2 */
1023 1023 hp->page = ohp->lzbuf;
1024 1024 ohp->lzbuf = NULL;
1025 1025 ++ohp;
1026 1026
1027 1027 } else {
1028 1028 /* bzip2 needs a 1-page buffer */
1029 1029 if ((sz + PAGESIZE) > endsz)
1030 1030 break;
1031 1031 hp->page = cfg->maxvm + sz;
1032 1032 sz += PAGESIZE;
1033 1033 }
1034 1034 }
1035 1035
1036 1036 /*
1037 1037 * Add output buffers per helper. The number of
1038 1038 * buffers per helper is determined by the ratio of
1039 1039 * ncbuf to nhelper.
1040 1040 */
1041 1041 for (k = 0; cp < endcp && (sz + CBUF_SIZE) <= endsz &&
1042 1042 k < NCBUF_PER_HELPER; k++) {
1043 1043 cp->state = CBUF_FREEBUF;
1044 1044 cp->size = CBUF_SIZE;
1045 1045 cp->buf = cfg->maxvm + sz;
1046 1046 sz += CBUF_SIZE;
1047 1047 ++cp;
1048 1048 }
1049 1049
1050 1050 /*
1051 1051 * bzip2 needs compression state. Use the dumpbzalloc
1052 1052 * and dumpbzfree callbacks to allocate the memory.
1053 1053 * bzip2 does allocation only at init time.
1054 1054 */
1055 1055 if (cfg->clevel >= DUMP_CLEVEL_BZIP2) {
1056 1056 if ((sz + bz2size) > endsz) {
1057 1057 hp->page = NULL;
1058 1058 break;
1059 1059 } else {
1060 1060 hp->bzstream.opaque = &sz;
1061 1061 hp->bzstream.bzalloc = dumpbzalloc;
1062 1062 hp->bzstream.bzfree = dumpbzfree;
1063 1063 (void) BZ2_bzCompressInit(&hp->bzstream,
1064 1064 dump_bzip2_level, 0, 0);
1065 1065 hp->bzstream.opaque = NULL;
1066 1066 }
1067 1067 }
1068 1068 }
1069 1069
1070 1070 /* Finish allocating output buffers */
1071 1071 for (; cp < endcp && (sz + CBUF_SIZE) <= endsz; cp++) {
1072 1072 cp->state = CBUF_FREEBUF;
1073 1073 cp->size = CBUF_SIZE;
1074 1074 cp->buf = cfg->maxvm + sz;
1075 1075 sz += CBUF_SIZE;
1076 1076 }
1077 1077
1078 1078 /* Enable IS_DUMP_PAGE macro, which checks for pages we took. */
1079 1079 if (cfg->found4m || cfg->foundsm)
1080 1080 dump_check_used = 1;
1081 1081
1082 1082 ASSERT(sz <= endsz);
1083 1083 }
1084 1084
1085 1085 static void
1086 1086 dumphdr_init(void)
1087 1087 {
1088 1088 pgcnt_t npages = 0;
1089 1089
1090 1090 ASSERT(MUTEX_HELD(&dump_lock));
1091 1091
1092 1092 if (dumphdr == NULL) {
1093 1093 dumphdr = kmem_zalloc(sizeof (dumphdr_t), KM_SLEEP);
1094 1094 dumphdr->dump_magic = DUMP_MAGIC;
1095 1095 dumphdr->dump_version = DUMP_VERSION;
1096 1096 dumphdr->dump_wordsize = DUMP_WORDSIZE;
1097 1097 dumphdr->dump_pageshift = PAGESHIFT;
1098 1098 dumphdr->dump_pagesize = PAGESIZE;
1099 1099 dumphdr->dump_utsname = utsname;
1100 1100 (void) strcpy(dumphdr->dump_platform, platform);
1101 1101 dumpbuf.size = dumpbuf_iosize(maxphys);
1102 1102 dumpbuf.start = kmem_alloc(dumpbuf.size, KM_SLEEP);
1103 1103 dumpbuf.end = dumpbuf.start + dumpbuf.size;
1104 1104 dumpcfg.pids = kmem_alloc(v.v_proc * sizeof (pid_t), KM_SLEEP);
1105 1105 dumpcfg.helpermap = kmem_zalloc(BT_SIZEOFMAP(NCPU), KM_SLEEP);
1106 1106 LOCK_INIT_HELD(&dumpcfg.helper_lock);
1107 1107 dump_stack_scratch = kmem_alloc(STACK_BUF_SIZE, KM_SLEEP);
1108 1108 (void) strncpy(dumphdr->dump_uuid, dump_get_uuid(),
1109 1109 sizeof (dumphdr->dump_uuid));
1110 1110 }
1111 1111
1112 1112 npages = num_phys_pages();
1113 1113
1114 1114 if (dumpcfg.bitmapsize != npages) {
1115 1115 size_t rlen = CBUF_MAPP2R(P2ROUNDUP(npages, CBUF_MAPNP));
1116 1116 void *map = kmem_alloc(BT_SIZEOFMAP(npages), KM_SLEEP);
1117 1117 void *rmap = kmem_alloc(BT_SIZEOFMAP(rlen), KM_SLEEP);
1118 1118
1119 1119 if (dumpcfg.bitmap != NULL)
1120 1120 kmem_free(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.
1121 1121 bitmapsize));
1122 1122 if (dumpcfg.rbitmap != NULL)
1123 1123 kmem_free(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.
1124 1124 rbitmapsize));
1125 1125 dumpcfg.bitmap = map;
1126 1126 dumpcfg.bitmapsize = npages;
1127 1127 dumpcfg.rbitmap = rmap;
1128 1128 dumpcfg.rbitmapsize = rlen;
1129 1129 }
1130 1130 }
1131 1131
1132 1132 /*
1133 1133 * Establish a new dump device.
1134 1134 */
1135 1135 int
1136 1136 dumpinit(vnode_t *vp, char *name, int justchecking)
1137 1137 {
1138 1138 vnode_t *cvp;
1139 1139 vattr_t vattr;
1140 1140 vnode_t *cdev_vp;
1141 1141 int error = 0;
1142 1142
1143 1143 ASSERT(MUTEX_HELD(&dump_lock));
1144 1144
1145 1145 dumphdr_init();
1146 1146
1147 1147 cvp = common_specvp(vp);
1148 1148 if (cvp == dumpvp)
1149 1149 return (0);
1150 1150
1151 1151 /*
1152 1152 * Determine whether this is a plausible dump device. We want either:
1153 1153 * (1) a real device that's not mounted and has a cb_dump routine, or
1154 1154 * (2) a swapfile on some filesystem that has a vop_dump routine.
1155 1155 */
1156 1156 if ((error = VOP_OPEN(&cvp, FREAD | FWRITE, kcred, NULL)) != 0)
1157 1157 return (error);
1158 1158
1159 1159 vattr.va_mask = AT_SIZE | AT_TYPE | AT_RDEV;
1160 1160 if ((error = VOP_GETATTR(cvp, &vattr, 0, kcred, NULL)) == 0) {
1161 1161 if (vattr.va_type == VBLK || vattr.va_type == VCHR) {
1162 1162 if (devopsp[getmajor(vattr.va_rdev)]->
1163 1163 devo_cb_ops->cb_dump == nodev)
1164 1164 error = ENOTSUP;
1165 1165 else if (vfs_devismounted(vattr.va_rdev))
1166 1166 error = EBUSY;
1167 1167 if (strcmp(ddi_driver_name(VTOS(cvp)->s_dip),
1168 1168 ZFS_DRIVER) == 0 &&
1169 1169 IS_SWAPVP(common_specvp(cvp)))
1170 1170 error = EBUSY;
1171 1171 } else {
1172 1172 if (vn_matchopval(cvp, VOPNAME_DUMP, fs_nosys) ||
1173 1173 !IS_SWAPVP(cvp))
1174 1174 error = ENOTSUP;
1175 1175 }
1176 1176 }
1177 1177
1178 1178 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE)
1179 1179 error = ENOSPC;
1180 1180
1181 1181 if (error || justchecking) {
1182 1182 (void) VOP_CLOSE(cvp, FREAD | FWRITE, 1, (offset_t)0,
1183 1183 kcred, NULL);
1184 1184 return (error);
1185 1185 }
1186 1186
1187 1187 VN_HOLD(cvp);
1188 1188
1189 1189 if (dumpvp != NULL)
1190 1190 dumpfini(); /* unconfigure the old dump device */
1191 1191
1192 1192 dumpvp = cvp;
1193 1193 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
1194 1194 dumppath = kmem_alloc(strlen(name) + 1, KM_SLEEP);
1195 1195 (void) strcpy(dumppath, name);
1196 1196 dumpbuf.iosize = 0;
1197 1197
1198 1198 /*
1199 1199 * If the dump device is a block device, attempt to open up the
1200 1200 * corresponding character device and determine its maximum transfer
1201 1201 * size. We use this information to potentially resize dumpbuf to a
1202 1202 * larger and more optimal size for performing i/o to the dump device.
1203 1203 */
1204 1204 if (cvp->v_type == VBLK &&
1205 1205 (cdev_vp = makespecvp(VTOS(cvp)->s_dev, VCHR)) != NULL) {
1206 1206 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1207 1207 size_t blk_size;
1208 1208 struct dk_cinfo dki;
1209 1209 struct dk_minfo minf;
1210 1210
1211 1211 if (VOP_IOCTL(cdev_vp, DKIOCGMEDIAINFO,
1212 1212 (intptr_t)&minf, FKIOCTL, kcred, NULL, NULL)
1213 1213 == 0 && minf.dki_lbsize != 0)
1214 1214 blk_size = minf.dki_lbsize;
1215 1215 else
1216 1216 blk_size = DEV_BSIZE;
1217 1217
1218 1218 if (VOP_IOCTL(cdev_vp, DKIOCINFO, (intptr_t)&dki,
1219 1219 FKIOCTL, kcred, NULL, NULL) == 0) {
1220 1220 dumpbuf.iosize = dki.dki_maxtransfer * blk_size;
1221 1221 dumpbuf_resize();
1222 1222 }
1223 1223 /*
1224 1224 * If we are working with a zvol then dumpify it
1225 1225 * if it's not being used as swap.
1226 1226 */
1227 1227 if (strcmp(dki.dki_dname, ZVOL_DRIVER) == 0) {
1228 1228 if (IS_SWAPVP(common_specvp(cvp)))
1229 1229 error = EBUSY;
1230 1230 else if ((error = VOP_IOCTL(cdev_vp,
1231 1231 DKIOCDUMPINIT, NULL, FKIOCTL, kcred,
1232 1232 NULL, NULL)) != 0)
1233 1233 dumpfini();
1234 1234 }
1235 1235
1236 1236 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1237 1237 kcred, NULL);
1238 1238 }
1239 1239
1240 1240 VN_RELE(cdev_vp);
1241 1241 }
1242 1242
1243 1243 cmn_err(CE_CONT, "?dump on %s size %llu MB\n", name, dumpvp_size >> 20);
1244 1244
1245 1245 dump_update_clevel();
1246 1246
1247 1247 return (error);
1248 1248 }
1249 1249
1250 1250 void
1251 1251 dumpfini(void)
1252 1252 {
1253 1253 vattr_t vattr;
1254 1254 boolean_t is_zfs = B_FALSE;
1255 1255 vnode_t *cdev_vp;
1256 1256 ASSERT(MUTEX_HELD(&dump_lock));
1257 1257
1258 1258 kmem_free(dumppath, strlen(dumppath) + 1);
1259 1259
1260 1260 /*
1261 1261 * Determine if we are using zvols for our dump device
1262 1262 */
1263 1263 vattr.va_mask = AT_RDEV;
1264 1264 if (VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL) == 0) {
1265 1265 is_zfs = (getmajor(vattr.va_rdev) ==
1266 1266 ddi_name_to_major(ZFS_DRIVER)) ? B_TRUE : B_FALSE;
1267 1267 }
1268 1268
1269 1269 /*
1270 1270 * If we have a zvol dump device then we call into zfs so
1271 1271 * that it may have a chance to cleanup.
1272 1272 */
1273 1273 if (is_zfs &&
1274 1274 (cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR)) != NULL) {
1275 1275 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1276 1276 (void) VOP_IOCTL(cdev_vp, DKIOCDUMPFINI, NULL, FKIOCTL,
1277 1277 kcred, NULL, NULL);
1278 1278 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1279 1279 kcred, NULL);
1280 1280 }
1281 1281 VN_RELE(cdev_vp);
1282 1282 }
1283 1283
1284 1284 (void) VOP_CLOSE(dumpvp, FREAD | FWRITE, 1, (offset_t)0, kcred, NULL);
1285 1285
1286 1286 VN_RELE(dumpvp);
1287 1287
1288 1288 dumpvp = NULL;
1289 1289 dumpvp_size = 0;
1290 1290 dumppath = NULL;
1291 1291 }
1292 1292
1293 1293 static offset_t
1294 1294 dumpvp_flush(void)
1295 1295 {
1296 1296 size_t size = P2ROUNDUP(dumpbuf.cur - dumpbuf.start, PAGESIZE);
1297 1297 hrtime_t iotime;
1298 1298 int err;
1299 1299
1300 1300 if (dumpbuf.vp_off + size > dumpbuf.vp_limit) {
1301 1301 dump_ioerr = ENOSPC;
1302 1302 dumpbuf.vp_off = dumpbuf.vp_limit;
1303 1303 } else if (size != 0) {
1304 1304 iotime = gethrtime();
1305 1305 dumpsync.iowait += iotime - dumpsync.iowaitts;
1306 1306 if (panicstr)
1307 1307 err = VOP_DUMP(dumpvp, dumpbuf.start,
1308 1308 lbtodb(dumpbuf.vp_off), btod(size), NULL);
1309 1309 else
1310 1310 err = vn_rdwr(UIO_WRITE, dumpbuf.cdev_vp != NULL ?
1311 1311 dumpbuf.cdev_vp : dumpvp, dumpbuf.start, size,
1312 1312 dumpbuf.vp_off, UIO_SYSSPACE, 0, dumpbuf.vp_limit,
1313 1313 kcred, 0);
1314 1314 if (err && dump_ioerr == 0)
1315 1315 dump_ioerr = err;
1316 1316 dumpsync.iowaitts = gethrtime();
1317 1317 dumpsync.iotime += dumpsync.iowaitts - iotime;
1318 1318 dumpsync.nwrite += size;
1319 1319 dumpbuf.vp_off += size;
1320 1320 }
1321 1321 dumpbuf.cur = dumpbuf.start;
1322 1322 dump_timeleft = dump_timeout;
1323 1323 return (dumpbuf.vp_off);
1324 1324 }
1325 1325
1326 1326 /* maximize write speed by keeping seek offset aligned with size */
1327 1327 void
1328 1328 dumpvp_write(const void *va, size_t size)
1329 1329 {
1330 1330 size_t len, off, sz;
1331 1331
1332 1332 while (size != 0) {
1333 1333 len = MIN(size, dumpbuf.end - dumpbuf.cur);
1334 1334 if (len == 0) {
1335 1335 off = P2PHASE(dumpbuf.vp_off, dumpbuf.size);
1336 1336 if (off == 0 || !ISP2(dumpbuf.size)) {
1337 1337 (void) dumpvp_flush();
1338 1338 } else {
1339 1339 sz = dumpbuf.size - off;
1340 1340 dumpbuf.cur = dumpbuf.start + sz;
1341 1341 (void) dumpvp_flush();
1342 1342 ovbcopy(dumpbuf.start + sz, dumpbuf.start, off);
1343 1343 dumpbuf.cur += off;
1344 1344 }
1345 1345 } else {
1346 1346 bcopy(va, dumpbuf.cur, len);
1347 1347 va = (char *)va + len;
1348 1348 dumpbuf.cur += len;
1349 1349 size -= len;
1350 1350 }
1351 1351 }
1352 1352 }
1353 1353
1354 1354 /*ARGSUSED*/
1355 1355 static void
1356 1356 dumpvp_ksyms_write(const void *src, void *dst, size_t size)
1357 1357 {
1358 1358 dumpvp_write(src, size);
1359 1359 }
1360 1360
1361 1361 /*
1362 1362 * Mark 'pfn' in the bitmap and dump its translation table entry.
1363 1363 */
1364 1364 void
1365 1365 dump_addpage(struct as *as, void *va, pfn_t pfn)
1366 1366 {
1367 1367 mem_vtop_t mem_vtop;
1368 1368 pgcnt_t bitnum;
1369 1369
1370 1370 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1371 1371 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1372 1372 dumphdr->dump_npages++;
1373 1373 BT_SET(dumpcfg.bitmap, bitnum);
1374 1374 }
1375 1375 dumphdr->dump_nvtop++;
1376 1376 mem_vtop.m_as = as;
1377 1377 mem_vtop.m_va = va;
1378 1378 mem_vtop.m_pfn = pfn;
1379 1379 dumpvp_write(&mem_vtop, sizeof (mem_vtop_t));
1380 1380 }
1381 1381 dump_timeleft = dump_timeout;
1382 1382 }
1383 1383
1384 1384 /*
1385 1385 * Mark 'pfn' in the bitmap
1386 1386 */
1387 1387 void
1388 1388 dump_page(pfn_t pfn)
1389 1389 {
1390 1390 pgcnt_t bitnum;
1391 1391
1392 1392 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1393 1393 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1394 1394 dumphdr->dump_npages++;
1395 1395 BT_SET(dumpcfg.bitmap, bitnum);
1396 1396 }
1397 1397 }
1398 1398 dump_timeleft = dump_timeout;
1399 1399 }
↓ open down ↓ |
1399 lines elided |
↑ open up ↑ |
1400 1400
1401 1401 /*
1402 1402 * Dump the <as, va, pfn> information for a given address space.
1403 1403 * SEGOP_DUMP() will call dump_addpage() for each page in the segment.
1404 1404 */
1405 1405 static void
1406 1406 dump_as(struct as *as)
1407 1407 {
1408 1408 struct seg *seg;
1409 1409
1410 - AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
1410 + AS_LOCK_ENTER(as, RW_READER);
1411 1411 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
1412 1412 if (seg->s_as != as)
1413 1413 break;
1414 1414 if (seg->s_ops == NULL)
1415 1415 continue;
1416 1416 SEGOP_DUMP(seg);
1417 1417 }
1418 - AS_LOCK_EXIT(as, &as->a_lock);
1418 + AS_LOCK_EXIT(as);
1419 1419
1420 1420 if (seg != NULL)
1421 1421 cmn_err(CE_WARN, "invalid segment %p in address space %p",
1422 1422 (void *)seg, (void *)as);
1423 1423 }
1424 1424
1425 1425 static int
1426 1426 dump_process(pid_t pid)
1427 1427 {
1428 1428 proc_t *p = sprlock(pid);
1429 1429
1430 1430 if (p == NULL)
1431 1431 return (-1);
1432 1432 if (p->p_as != &kas) {
1433 1433 mutex_exit(&p->p_lock);
1434 1434 dump_as(p->p_as);
1435 1435 mutex_enter(&p->p_lock);
1436 1436 }
1437 1437
1438 1438 sprunlock(p);
1439 1439
1440 1440 return (0);
1441 1441 }
1442 1442
1443 1443 /*
1444 1444 * The following functions (dump_summary(), dump_ereports(), and
1445 1445 * dump_messages()), write data to an uncompressed area within the
1446 1446 * crashdump. The layout of these is
1447 1447 *
1448 1448 * +------------------------------------------------------------+
1449 1449 * | compressed pages | summary | ereports | messages |
1450 1450 * +------------------------------------------------------------+
1451 1451 *
1452 1452 * With the advent of saving a compressed crash dump by default, we
1453 1453 * need to save a little more data to describe the failure mode in
1454 1454 * an uncompressed buffer available before savecore uncompresses
1455 1455 * the dump. Initially this is a copy of the stack trace. Additional
1456 1456 * summary information should be added here.
1457 1457 */
1458 1458
1459 1459 void
1460 1460 dump_summary(void)
1461 1461 {
1462 1462 u_offset_t dumpvp_start;
1463 1463 summary_dump_t sd;
1464 1464
1465 1465 if (dumpvp == NULL || dumphdr == NULL)
1466 1466 return;
1467 1467
1468 1468 dumpbuf.cur = dumpbuf.start;
1469 1469
1470 1470 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE +
1471 1471 DUMP_ERPTSIZE);
1472 1472 dumpvp_start = dumpbuf.vp_limit - DUMP_SUMMARYSIZE;
1473 1473 dumpbuf.vp_off = dumpvp_start;
1474 1474
1475 1475 sd.sd_magic = SUMMARY_MAGIC;
1476 1476 sd.sd_ssum = checksum32(dump_stack_scratch, STACK_BUF_SIZE);
1477 1477 dumpvp_write(&sd, sizeof (sd));
1478 1478 dumpvp_write(dump_stack_scratch, STACK_BUF_SIZE);
1479 1479
1480 1480 sd.sd_magic = 0; /* indicate end of summary */
1481 1481 dumpvp_write(&sd, sizeof (sd));
1482 1482 (void) dumpvp_flush();
1483 1483 }
1484 1484
1485 1485 void
1486 1486 dump_ereports(void)
1487 1487 {
1488 1488 u_offset_t dumpvp_start;
1489 1489 erpt_dump_t ed;
1490 1490
1491 1491 if (dumpvp == NULL || dumphdr == NULL)
1492 1492 return;
1493 1493
1494 1494 dumpbuf.cur = dumpbuf.start;
1495 1495 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE);
1496 1496 dumpvp_start = dumpbuf.vp_limit - DUMP_ERPTSIZE;
1497 1497 dumpbuf.vp_off = dumpvp_start;
1498 1498
1499 1499 fm_ereport_dump();
1500 1500 if (panicstr)
1501 1501 errorq_dump();
1502 1502
1503 1503 bzero(&ed, sizeof (ed)); /* indicate end of ereports */
1504 1504 dumpvp_write(&ed, sizeof (ed));
1505 1505 (void) dumpvp_flush();
1506 1506
1507 1507 if (!panicstr) {
1508 1508 (void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1509 1509 (size_t)(dumpbuf.vp_off - dumpvp_start),
1510 1510 B_INVAL | B_FORCE, kcred, NULL);
1511 1511 }
1512 1512 }
1513 1513
1514 1514 void
1515 1515 dump_messages(void)
1516 1516 {
1517 1517 log_dump_t ld;
1518 1518 mblk_t *mctl, *mdata;
1519 1519 queue_t *q, *qlast;
1520 1520 u_offset_t dumpvp_start;
1521 1521
1522 1522 if (dumpvp == NULL || dumphdr == NULL || log_consq == NULL)
1523 1523 return;
1524 1524
1525 1525 dumpbuf.cur = dumpbuf.start;
1526 1526 dumpbuf.vp_limit = dumpvp_size - DUMP_OFFSET;
1527 1527 dumpvp_start = dumpbuf.vp_limit - DUMP_LOGSIZE;
1528 1528 dumpbuf.vp_off = dumpvp_start;
1529 1529
1530 1530 qlast = NULL;
1531 1531 do {
1532 1532 for (q = log_consq; q->q_next != qlast; q = q->q_next)
1533 1533 continue;
1534 1534 for (mctl = q->q_first; mctl != NULL; mctl = mctl->b_next) {
1535 1535 dump_timeleft = dump_timeout;
1536 1536 mdata = mctl->b_cont;
1537 1537 ld.ld_magic = LOG_MAGIC;
1538 1538 ld.ld_msgsize = MBLKL(mctl->b_cont);
1539 1539 ld.ld_csum = checksum32(mctl->b_rptr, MBLKL(mctl));
1540 1540 ld.ld_msum = checksum32(mdata->b_rptr, MBLKL(mdata));
1541 1541 dumpvp_write(&ld, sizeof (ld));
1542 1542 dumpvp_write(mctl->b_rptr, MBLKL(mctl));
1543 1543 dumpvp_write(mdata->b_rptr, MBLKL(mdata));
1544 1544 }
1545 1545 } while ((qlast = q) != log_consq);
1546 1546
1547 1547 ld.ld_magic = 0; /* indicate end of messages */
1548 1548 dumpvp_write(&ld, sizeof (ld));
1549 1549 (void) dumpvp_flush();
1550 1550 if (!panicstr) {
1551 1551 (void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1552 1552 (size_t)(dumpbuf.vp_off - dumpvp_start),
1553 1553 B_INVAL | B_FORCE, kcred, NULL);
1554 1554 }
1555 1555 }
1556 1556
1557 1557 /*
1558 1558 * The following functions are called on multiple CPUs during dump.
1559 1559 * They must not use most kernel services, because all cross-calls are
1560 1560 * disabled during panic. Therefore, blocking locks and cache flushes
1561 1561 * will not work.
1562 1562 */
1563 1563
1564 1564 /*
1565 1565 * Copy pages, trapping ECC errors. Also, for robustness, trap data
1566 1566 * access in case something goes wrong in the hat layer and the
1567 1567 * mapping is broken.
1568 1568 */
1569 1569 static int
1570 1570 dump_pagecopy(void *src, void *dst)
1571 1571 {
1572 1572 long *wsrc = (long *)src;
1573 1573 long *wdst = (long *)dst;
1574 1574 const ulong_t ncopies = PAGESIZE / sizeof (long);
1575 1575 volatile int w = 0;
1576 1576 volatile int ueoff = -1;
1577 1577 on_trap_data_t otd;
1578 1578
1579 1579 if (on_trap(&otd, OT_DATA_EC | OT_DATA_ACCESS)) {
1580 1580 if (ueoff == -1)
1581 1581 ueoff = w * sizeof (long);
1582 1582 /* report "bad ECC" or "bad address" */
1583 1583 #ifdef _LP64
1584 1584 if (otd.ot_trap & OT_DATA_EC)
1585 1585 wdst[w++] = 0x00badecc00badecc;
1586 1586 else
1587 1587 wdst[w++] = 0x00badadd00badadd;
1588 1588 #else
1589 1589 if (otd.ot_trap & OT_DATA_EC)
1590 1590 wdst[w++] = 0x00badecc;
1591 1591 else
1592 1592 wdst[w++] = 0x00badadd;
1593 1593 #endif
1594 1594 }
1595 1595 while (w < ncopies) {
1596 1596 wdst[w] = wsrc[w];
1597 1597 w++;
1598 1598 }
1599 1599 no_trap();
1600 1600 return (ueoff);
1601 1601 }
1602 1602
1603 1603 static void
1604 1604 dumpsys_close_cq(cqueue_t *cq, int live)
1605 1605 {
1606 1606 if (live) {
1607 1607 mutex_enter(&cq->mutex);
1608 1608 atomic_dec_uint(&cq->open);
1609 1609 cv_signal(&cq->cv);
1610 1610 mutex_exit(&cq->mutex);
1611 1611 } else {
1612 1612 atomic_dec_uint(&cq->open);
1613 1613 }
1614 1614 }
1615 1615
1616 1616 static inline void
1617 1617 dumpsys_spinlock(lock_t *lp)
1618 1618 {
1619 1619 uint_t backoff = 0;
1620 1620 int loop_count = 0;
1621 1621
1622 1622 while (LOCK_HELD(lp) || !lock_spin_try(lp)) {
1623 1623 if (++loop_count >= ncpus) {
1624 1624 backoff = mutex_lock_backoff(0);
1625 1625 loop_count = 0;
1626 1626 } else {
1627 1627 backoff = mutex_lock_backoff(backoff);
1628 1628 }
1629 1629 mutex_lock_delay(backoff);
1630 1630 }
1631 1631 }
1632 1632
1633 1633 static inline void
1634 1634 dumpsys_spinunlock(lock_t *lp)
1635 1635 {
1636 1636 lock_clear(lp);
1637 1637 }
1638 1638
1639 1639 static inline void
1640 1640 dumpsys_lock(cqueue_t *cq, int live)
1641 1641 {
1642 1642 if (live)
1643 1643 mutex_enter(&cq->mutex);
1644 1644 else
1645 1645 dumpsys_spinlock(&cq->spinlock);
1646 1646 }
1647 1647
1648 1648 static inline void
1649 1649 dumpsys_unlock(cqueue_t *cq, int live, int signal)
1650 1650 {
1651 1651 if (live) {
1652 1652 if (signal)
1653 1653 cv_signal(&cq->cv);
1654 1654 mutex_exit(&cq->mutex);
1655 1655 } else {
1656 1656 dumpsys_spinunlock(&cq->spinlock);
1657 1657 }
1658 1658 }
1659 1659
1660 1660 static void
1661 1661 dumpsys_wait_cq(cqueue_t *cq, int live)
1662 1662 {
1663 1663 if (live) {
1664 1664 cv_wait(&cq->cv, &cq->mutex);
1665 1665 } else {
1666 1666 dumpsys_spinunlock(&cq->spinlock);
1667 1667 while (cq->open)
1668 1668 if (cq->first)
1669 1669 break;
1670 1670 dumpsys_spinlock(&cq->spinlock);
1671 1671 }
1672 1672 }
1673 1673
1674 1674 static void
1675 1675 dumpsys_put_cq(cqueue_t *cq, cbuf_t *cp, int newstate, int live)
1676 1676 {
1677 1677 if (cp == NULL)
1678 1678 return;
1679 1679
1680 1680 dumpsys_lock(cq, live);
1681 1681
1682 1682 if (cq->ts != 0) {
1683 1683 cq->empty += gethrtime() - cq->ts;
1684 1684 cq->ts = 0;
1685 1685 }
1686 1686
1687 1687 cp->state = newstate;
1688 1688 cp->next = NULL;
1689 1689 if (cq->last == NULL)
1690 1690 cq->first = cp;
1691 1691 else
1692 1692 cq->last->next = cp;
1693 1693 cq->last = cp;
1694 1694
1695 1695 dumpsys_unlock(cq, live, 1);
1696 1696 }
1697 1697
1698 1698 static cbuf_t *
1699 1699 dumpsys_get_cq(cqueue_t *cq, int live)
1700 1700 {
1701 1701 cbuf_t *cp;
1702 1702 hrtime_t now = gethrtime();
1703 1703
1704 1704 dumpsys_lock(cq, live);
1705 1705
1706 1706 /* CONSTCOND */
1707 1707 while (1) {
1708 1708 cp = (cbuf_t *)cq->first;
1709 1709 if (cp == NULL) {
1710 1710 if (cq->open == 0)
1711 1711 break;
1712 1712 dumpsys_wait_cq(cq, live);
1713 1713 continue;
1714 1714 }
1715 1715 cq->first = cp->next;
1716 1716 if (cq->first == NULL) {
1717 1717 cq->last = NULL;
1718 1718 cq->ts = now;
1719 1719 }
1720 1720 break;
1721 1721 }
1722 1722
1723 1723 dumpsys_unlock(cq, live, cq->first != NULL || cq->open == 0);
1724 1724 return (cp);
1725 1725 }
1726 1726
1727 1727 /*
1728 1728 * Send an error message to the console. If the main task is running
1729 1729 * just write the message via uprintf. If a helper is running the
1730 1730 * message has to be put on a queue for the main task. Setting fmt to
1731 1731 * NULL means flush the error message buffer. If fmt is not NULL, just
1732 1732 * add the text to the existing buffer.
1733 1733 */
1734 1734 static void
1735 1735 dumpsys_errmsg(helper_t *hp, const char *fmt, ...)
1736 1736 {
1737 1737 dumpsync_t *ds = hp->ds;
1738 1738 cbuf_t *cp = hp->cperr;
1739 1739 va_list adx;
1740 1740
1741 1741 if (hp->helper == MAINHELPER) {
1742 1742 if (fmt != NULL) {
1743 1743 if (ds->neednl) {
1744 1744 uprintf("\n");
1745 1745 ds->neednl = 0;
1746 1746 }
1747 1747 va_start(adx, fmt);
1748 1748 vuprintf(fmt, adx);
1749 1749 va_end(adx);
1750 1750 }
1751 1751 } else if (fmt == NULL) {
1752 1752 if (cp != NULL) {
1753 1753 CQ_PUT(mainq, cp, CBUF_ERRMSG);
1754 1754 hp->cperr = NULL;
1755 1755 }
1756 1756 } else {
1757 1757 if (hp->cperr == NULL) {
1758 1758 cp = CQ_GET(freebufq);
1759 1759 hp->cperr = cp;
1760 1760 cp->used = 0;
1761 1761 }
1762 1762 va_start(adx, fmt);
1763 1763 cp->used += vsnprintf(cp->buf + cp->used, cp->size - cp->used,
1764 1764 fmt, adx);
1765 1765 va_end(adx);
1766 1766 if ((cp->used + LOG_MSGSIZE) > cp->size) {
1767 1767 CQ_PUT(mainq, cp, CBUF_ERRMSG);
1768 1768 hp->cperr = NULL;
1769 1769 }
1770 1770 }
1771 1771 }
1772 1772
1773 1773 /*
1774 1774 * Write an output buffer to the dump file. If the main task is
1775 1775 * running just write the data. If a helper is running the output is
1776 1776 * placed on a queue for the main task.
1777 1777 */
1778 1778 static void
1779 1779 dumpsys_swrite(helper_t *hp, cbuf_t *cp, size_t used)
1780 1780 {
1781 1781 dumpsync_t *ds = hp->ds;
1782 1782
1783 1783 if (hp->helper == MAINHELPER) {
1784 1784 HRSTART(ds->perpage, write);
1785 1785 dumpvp_write(cp->buf, used);
1786 1786 HRSTOP(ds->perpage, write);
1787 1787 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
1788 1788 } else {
1789 1789 cp->used = used;
1790 1790 CQ_PUT(mainq, cp, CBUF_WRITE);
1791 1791 }
1792 1792 }
1793 1793
1794 1794 /*
1795 1795 * Copy one page within the mapped range. The offset starts at 0 and
1796 1796 * is relative to the first pfn. cp->buf + cp->off is the address of
1797 1797 * the first pfn. If dump_pagecopy returns a UE offset, create an
1798 1798 * error message. Returns the offset to the next pfn in the range
1799 1799 * selected by the bitmap.
1800 1800 */
1801 1801 static int
1802 1802 dumpsys_copy_page(helper_t *hp, int offset)
1803 1803 {
1804 1804 cbuf_t *cp = hp->cpin;
1805 1805 int ueoff;
1806 1806
1807 1807 ASSERT(cp->off + offset + PAGESIZE <= cp->size);
1808 1808 ASSERT(BT_TEST(dumpcfg.bitmap, cp->bitnum));
1809 1809
1810 1810 ueoff = dump_pagecopy(cp->buf + cp->off + offset, hp->page);
1811 1811
1812 1812 /* ueoff is the offset in the page to a UE error */
1813 1813 if (ueoff != -1) {
1814 1814 uint64_t pa = ptob(cp->pfn) + offset + ueoff;
1815 1815
1816 1816 dumpsys_errmsg(hp, "cpu %d: memory error at PA 0x%08x.%08x\n",
1817 1817 CPU->cpu_id, (uint32_t)(pa >> 32), (uint32_t)pa);
1818 1818 }
1819 1819
1820 1820 /*
1821 1821 * Advance bitnum and offset to the next input page for the
1822 1822 * next call to this function.
1823 1823 */
1824 1824 offset += PAGESIZE;
1825 1825 cp->bitnum++;
1826 1826 while (cp->off + offset < cp->size) {
1827 1827 if (BT_TEST(dumpcfg.bitmap, cp->bitnum))
1828 1828 break;
1829 1829 offset += PAGESIZE;
1830 1830 cp->bitnum++;
1831 1831 }
1832 1832
1833 1833 return (offset);
1834 1834 }
1835 1835
1836 1836 /*
1837 1837 * Read the helper queue, and copy one mapped page. Return 0 when
1838 1838 * done. Return 1 when a page has been copied into hp->page.
1839 1839 */
1840 1840 static int
1841 1841 dumpsys_sread(helper_t *hp)
1842 1842 {
1843 1843 dumpsync_t *ds = hp->ds;
1844 1844
1845 1845 /* CONSTCOND */
1846 1846 while (1) {
1847 1847
1848 1848 /* Find the next input buffer. */
1849 1849 if (hp->cpin == NULL) {
1850 1850 HRSTART(hp->perpage, inwait);
1851 1851
1852 1852 /* CONSTCOND */
1853 1853 while (1) {
1854 1854 hp->cpin = CQ_GET(helperq);
1855 1855 dump_timeleft = dump_timeout;
1856 1856
1857 1857 /*
1858 1858 * NULL return means the helper queue
1859 1859 * is closed and empty.
1860 1860 */
1861 1861 if (hp->cpin == NULL)
1862 1862 break;
1863 1863
1864 1864 /* Have input, check for dump I/O error. */
1865 1865 if (!dump_ioerr)
1866 1866 break;
1867 1867
1868 1868 /*
1869 1869 * If an I/O error occurs, stay in the
1870 1870 * loop in order to empty the helper
1871 1871 * queue. Return the buffers to the
1872 1872 * main task to unmap and free it.
1873 1873 */
1874 1874 hp->cpin->used = 0;
1875 1875 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1876 1876 }
1877 1877 HRSTOP(hp->perpage, inwait);
1878 1878
1879 1879 /* Stop here when the helper queue is closed. */
1880 1880 if (hp->cpin == NULL)
1881 1881 break;
1882 1882
1883 1883 /* Set the offset=0 to get the first pfn. */
1884 1884 hp->in = 0;
1885 1885
1886 1886 /* Set the total processed to 0 */
1887 1887 hp->used = 0;
1888 1888 }
1889 1889
1890 1890 /* Process the next page. */
1891 1891 if (hp->used < hp->cpin->used) {
1892 1892
1893 1893 /*
1894 1894 * Get the next page from the input buffer and
1895 1895 * return a copy.
1896 1896 */
1897 1897 ASSERT(hp->in != -1);
1898 1898 HRSTART(hp->perpage, copy);
1899 1899 hp->in = dumpsys_copy_page(hp, hp->in);
1900 1900 hp->used += PAGESIZE;
1901 1901 HRSTOP(hp->perpage, copy);
1902 1902 break;
1903 1903
1904 1904 } else {
1905 1905
1906 1906 /*
1907 1907 * Done with the input. Flush the VM and
1908 1908 * return the buffer to the main task.
1909 1909 */
1910 1910 if (panicstr && hp->helper != MAINHELPER)
1911 1911 hat_flush_range(kas.a_hat,
1912 1912 hp->cpin->buf, hp->cpin->size);
1913 1913 dumpsys_errmsg(hp, NULL);
1914 1914 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1915 1915 hp->cpin = NULL;
1916 1916 }
1917 1917 }
1918 1918
1919 1919 return (hp->cpin != NULL);
1920 1920 }
1921 1921
1922 1922 /*
1923 1923 * Compress size bytes starting at buf with bzip2
1924 1924 * mode:
1925 1925 * BZ_RUN add one more compressed page
1926 1926 * BZ_FINISH no more input, flush the state
1927 1927 */
1928 1928 static void
1929 1929 dumpsys_bzrun(helper_t *hp, void *buf, size_t size, int mode)
1930 1930 {
1931 1931 dumpsync_t *ds = hp->ds;
1932 1932 const int CSIZE = sizeof (dumpcsize_t);
1933 1933 bz_stream *ps = &hp->bzstream;
1934 1934 int rc = 0;
1935 1935 uint32_t csize;
1936 1936 dumpcsize_t cs;
1937 1937
1938 1938 /* Set input pointers to new input page */
1939 1939 if (size > 0) {
1940 1940 ps->avail_in = size;
1941 1941 ps->next_in = buf;
1942 1942 }
1943 1943
1944 1944 /* CONSTCOND */
1945 1945 while (1) {
1946 1946
1947 1947 /* Quit when all input has been consumed */
1948 1948 if (ps->avail_in == 0 && mode == BZ_RUN)
1949 1949 break;
1950 1950
1951 1951 /* Get a new output buffer */
1952 1952 if (hp->cpout == NULL) {
1953 1953 HRSTART(hp->perpage, outwait);
1954 1954 hp->cpout = CQ_GET(freebufq);
1955 1955 HRSTOP(hp->perpage, outwait);
1956 1956 ps->avail_out = hp->cpout->size - CSIZE;
1957 1957 ps->next_out = hp->cpout->buf + CSIZE;
1958 1958 }
1959 1959
1960 1960 /* Compress input, or finalize */
1961 1961 HRSTART(hp->perpage, compress);
1962 1962 rc = BZ2_bzCompress(ps, mode);
1963 1963 HRSTOP(hp->perpage, compress);
1964 1964
1965 1965 /* Check for error */
1966 1966 if (mode == BZ_RUN && rc != BZ_RUN_OK) {
1967 1967 dumpsys_errmsg(hp, "%d: BZ_RUN error %s at page %lx\n",
1968 1968 hp->helper, BZ2_bzErrorString(rc),
1969 1969 hp->cpin->pagenum);
1970 1970 break;
1971 1971 }
1972 1972
1973 1973 /* Write the buffer if it is full, or we are flushing */
1974 1974 if (ps->avail_out == 0 || mode == BZ_FINISH) {
1975 1975 csize = hp->cpout->size - CSIZE - ps->avail_out;
1976 1976 cs = DUMP_SET_TAG(csize, hp->tag);
1977 1977 if (csize > 0) {
1978 1978 (void) memcpy(hp->cpout->buf, &cs, CSIZE);
1979 1979 dumpsys_swrite(hp, hp->cpout, csize + CSIZE);
1980 1980 hp->cpout = NULL;
1981 1981 }
1982 1982 }
1983 1983
1984 1984 /* Check for final complete */
1985 1985 if (mode == BZ_FINISH) {
1986 1986 if (rc == BZ_STREAM_END)
1987 1987 break;
1988 1988 if (rc != BZ_FINISH_OK) {
1989 1989 dumpsys_errmsg(hp, "%d: BZ_FINISH error %s\n",
1990 1990 hp->helper, BZ2_bzErrorString(rc));
1991 1991 break;
1992 1992 }
1993 1993 }
1994 1994 }
1995 1995
1996 1996 /* Cleanup state and buffers */
1997 1997 if (mode == BZ_FINISH) {
1998 1998
1999 1999 /* Reset state so that it is re-usable. */
2000 2000 (void) BZ2_bzCompressReset(&hp->bzstream);
2001 2001
2002 2002 /* Give any unused outout buffer to the main task */
2003 2003 if (hp->cpout != NULL) {
2004 2004 hp->cpout->used = 0;
2005 2005 CQ_PUT(mainq, hp->cpout, CBUF_ERRMSG);
2006 2006 hp->cpout = NULL;
2007 2007 }
2008 2008 }
2009 2009 }
2010 2010
2011 2011 static void
2012 2012 dumpsys_bz2compress(helper_t *hp)
2013 2013 {
2014 2014 dumpsync_t *ds = hp->ds;
2015 2015 dumpstreamhdr_t sh;
2016 2016
2017 2017 (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
2018 2018 sh.stream_pagenum = (pgcnt_t)-1;
2019 2019 sh.stream_npages = 0;
2020 2020 hp->cpin = NULL;
2021 2021 hp->cpout = NULL;
2022 2022 hp->cperr = NULL;
2023 2023 hp->in = 0;
2024 2024 hp->out = 0;
2025 2025 hp->bzstream.avail_in = 0;
2026 2026
2027 2027 /* Bump reference to mainq while we are running */
2028 2028 CQ_OPEN(mainq);
2029 2029
2030 2030 /* Get one page at a time */
2031 2031 while (dumpsys_sread(hp)) {
2032 2032 if (sh.stream_pagenum != hp->cpin->pagenum) {
2033 2033 sh.stream_pagenum = hp->cpin->pagenum;
2034 2034 sh.stream_npages = btop(hp->cpin->used);
2035 2035 dumpsys_bzrun(hp, &sh, sizeof (sh), BZ_RUN);
2036 2036 }
2037 2037 dumpsys_bzrun(hp, hp->page, PAGESIZE, 0);
2038 2038 }
2039 2039
2040 2040 /* Done with input, flush any partial buffer */
2041 2041 if (sh.stream_pagenum != (pgcnt_t)-1) {
2042 2042 dumpsys_bzrun(hp, NULL, 0, BZ_FINISH);
2043 2043 dumpsys_errmsg(hp, NULL);
2044 2044 }
2045 2045
2046 2046 ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
2047 2047
2048 2048 /* Decrement main queue count, we are done */
2049 2049 CQ_CLOSE(mainq);
2050 2050 }
2051 2051
2052 2052 /*
2053 2053 * Compress with lzjb
2054 2054 * write stream block if full or size==0
2055 2055 * if csize==0 write stream header, else write <csize, data>
2056 2056 * size==0 is a call to flush a buffer
2057 2057 * hp->cpout is the buffer we are flushing or filling
2058 2058 * hp->out is the next index to fill data
2059 2059 * osize is either csize+data, or the size of a stream header
2060 2060 */
2061 2061 static void
2062 2062 dumpsys_lzjbrun(helper_t *hp, size_t csize, void *buf, size_t size)
2063 2063 {
2064 2064 dumpsync_t *ds = hp->ds;
2065 2065 const int CSIZE = sizeof (dumpcsize_t);
2066 2066 dumpcsize_t cs;
2067 2067 size_t osize = csize > 0 ? CSIZE + size : size;
2068 2068
2069 2069 /* If flush, and there is no buffer, just return */
2070 2070 if (size == 0 && hp->cpout == NULL)
2071 2071 return;
2072 2072
2073 2073 /* If flush, or cpout is full, write it out */
2074 2074 if (size == 0 ||
2075 2075 hp->cpout != NULL && hp->out + osize > hp->cpout->size) {
2076 2076
2077 2077 /* Set tag+size word at the front of the stream block. */
2078 2078 cs = DUMP_SET_TAG(hp->out - CSIZE, hp->tag);
2079 2079 (void) memcpy(hp->cpout->buf, &cs, CSIZE);
2080 2080
2081 2081 /* Write block to dump file. */
2082 2082 dumpsys_swrite(hp, hp->cpout, hp->out);
2083 2083
2084 2084 /* Clear pointer to indicate we need a new buffer */
2085 2085 hp->cpout = NULL;
2086 2086
2087 2087 /* flushing, we are done */
2088 2088 if (size == 0)
2089 2089 return;
2090 2090 }
2091 2091
2092 2092 /* Get an output buffer if we dont have one. */
2093 2093 if (hp->cpout == NULL) {
2094 2094 HRSTART(hp->perpage, outwait);
2095 2095 hp->cpout = CQ_GET(freebufq);
2096 2096 HRSTOP(hp->perpage, outwait);
2097 2097 hp->out = CSIZE;
2098 2098 }
2099 2099
2100 2100 /* Store csize word. This is the size of compressed data. */
2101 2101 if (csize > 0) {
2102 2102 cs = DUMP_SET_TAG(csize, 0);
2103 2103 (void) memcpy(hp->cpout->buf + hp->out, &cs, CSIZE);
2104 2104 hp->out += CSIZE;
2105 2105 }
2106 2106
2107 2107 /* Store the data. */
2108 2108 (void) memcpy(hp->cpout->buf + hp->out, buf, size);
2109 2109 hp->out += size;
2110 2110 }
2111 2111
2112 2112 static void
2113 2113 dumpsys_lzjbcompress(helper_t *hp)
2114 2114 {
2115 2115 dumpsync_t *ds = hp->ds;
2116 2116 size_t csize;
2117 2117 dumpstreamhdr_t sh;
2118 2118
2119 2119 (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
2120 2120 sh.stream_pagenum = (pfn_t)-1;
2121 2121 sh.stream_npages = 0;
2122 2122 hp->cpin = NULL;
2123 2123 hp->cpout = NULL;
2124 2124 hp->cperr = NULL;
2125 2125 hp->in = 0;
2126 2126 hp->out = 0;
2127 2127
2128 2128 /* Bump reference to mainq while we are running */
2129 2129 CQ_OPEN(mainq);
2130 2130
2131 2131 /* Get one page at a time */
2132 2132 while (dumpsys_sread(hp)) {
2133 2133
2134 2134 /* Create a stream header for each new input map */
2135 2135 if (sh.stream_pagenum != hp->cpin->pagenum) {
2136 2136 sh.stream_pagenum = hp->cpin->pagenum;
2137 2137 sh.stream_npages = btop(hp->cpin->used);
2138 2138 dumpsys_lzjbrun(hp, 0, &sh, sizeof (sh));
2139 2139 }
2140 2140
2141 2141 /* Compress one page */
2142 2142 HRSTART(hp->perpage, compress);
2143 2143 csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2144 2144 HRSTOP(hp->perpage, compress);
2145 2145
2146 2146 /* Add csize+data to output block */
2147 2147 ASSERT(csize > 0 && csize <= PAGESIZE);
2148 2148 dumpsys_lzjbrun(hp, csize, hp->lzbuf, csize);
2149 2149 }
2150 2150
2151 2151 /* Done with input, flush any partial buffer */
2152 2152 if (sh.stream_pagenum != (pfn_t)-1) {
2153 2153 dumpsys_lzjbrun(hp, 0, NULL, 0);
2154 2154 dumpsys_errmsg(hp, NULL);
2155 2155 }
2156 2156
2157 2157 ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
2158 2158
2159 2159 /* Decrement main queue count, we are done */
2160 2160 CQ_CLOSE(mainq);
2161 2161 }
2162 2162
2163 2163 /*
2164 2164 * Dump helper called from panic_idle() to compress pages. CPUs in
2165 2165 * this path must not call most kernel services.
2166 2166 *
2167 2167 * During panic, all but one of the CPUs is idle. These CPUs are used
2168 2168 * as helpers working in parallel to copy and compress memory
2169 2169 * pages. During a panic, however, these processors cannot call any
2170 2170 * kernel services. This is because mutexes become no-ops during
2171 2171 * panic, and, cross-call interrupts are inhibited. Therefore, during
2172 2172 * panic dump the helper CPUs communicate with the panic CPU using
2173 2173 * memory variables. All memory mapping and I/O is performed by the
2174 2174 * panic CPU.
2175 2175 *
2176 2176 * At dump configuration time, helper_lock is set and helpers_wanted
2177 2177 * is 0. dumpsys() decides whether to set helpers_wanted before
2178 2178 * clearing helper_lock.
2179 2179 *
2180 2180 * At panic time, idle CPUs spin-wait on helper_lock, then alternately
2181 2181 * take the lock and become a helper, or return.
2182 2182 */
2183 2183 void
2184 2184 dumpsys_helper()
2185 2185 {
2186 2186 dumpsys_spinlock(&dumpcfg.helper_lock);
2187 2187 if (dumpcfg.helpers_wanted) {
2188 2188 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2189 2189
2190 2190 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2191 2191 if (hp->helper == FREEHELPER) {
2192 2192 hp->helper = CPU->cpu_id;
2193 2193 BT_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2194 2194
2195 2195 dumpsys_spinunlock(&dumpcfg.helper_lock);
2196 2196
2197 2197 if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2198 2198 dumpsys_lzjbcompress(hp);
2199 2199 else
2200 2200 dumpsys_bz2compress(hp);
2201 2201
2202 2202 hp->helper = DONEHELPER;
2203 2203 return;
2204 2204 }
2205 2205 }
2206 2206
2207 2207 /* No more helpers are needed. */
2208 2208 dumpcfg.helpers_wanted = 0;
2209 2209
2210 2210 }
2211 2211 dumpsys_spinunlock(&dumpcfg.helper_lock);
2212 2212 }
2213 2213
2214 2214 /*
2215 2215 * No-wait helper callable in spin loops.
2216 2216 *
2217 2217 * Do not wait for helper_lock. Just check helpers_wanted. The caller
2218 2218 * may decide to continue. This is the "c)ontinue, s)ync, r)eset? s"
2219 2219 * case.
2220 2220 */
2221 2221 void
2222 2222 dumpsys_helper_nw()
2223 2223 {
2224 2224 if (dumpcfg.helpers_wanted)
2225 2225 dumpsys_helper();
2226 2226 }
2227 2227
2228 2228 /*
2229 2229 * Dump helper for live dumps.
2230 2230 * These run as a system task.
2231 2231 */
2232 2232 static void
2233 2233 dumpsys_live_helper(void *arg)
2234 2234 {
2235 2235 helper_t *hp = arg;
2236 2236
2237 2237 BT_ATOMIC_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2238 2238 if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2239 2239 dumpsys_lzjbcompress(hp);
2240 2240 else
2241 2241 dumpsys_bz2compress(hp);
2242 2242 }
2243 2243
2244 2244 /*
2245 2245 * Compress one page with lzjb (single threaded case)
2246 2246 */
2247 2247 static void
2248 2248 dumpsys_lzjb_page(helper_t *hp, cbuf_t *cp)
2249 2249 {
2250 2250 dumpsync_t *ds = hp->ds;
2251 2251 uint32_t csize;
2252 2252
2253 2253 hp->helper = MAINHELPER;
2254 2254 hp->in = 0;
2255 2255 hp->used = 0;
2256 2256 hp->cpin = cp;
2257 2257 while (hp->used < cp->used) {
2258 2258 HRSTART(hp->perpage, copy);
2259 2259 hp->in = dumpsys_copy_page(hp, hp->in);
2260 2260 hp->used += PAGESIZE;
2261 2261 HRSTOP(hp->perpage, copy);
2262 2262
2263 2263 HRSTART(hp->perpage, compress);
2264 2264 csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2265 2265 HRSTOP(hp->perpage, compress);
2266 2266
2267 2267 HRSTART(hp->perpage, write);
2268 2268 dumpvp_write(&csize, sizeof (csize));
2269 2269 dumpvp_write(hp->lzbuf, csize);
2270 2270 HRSTOP(hp->perpage, write);
2271 2271 }
2272 2272 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
2273 2273 hp->cpin = NULL;
2274 2274 }
2275 2275
2276 2276 /*
2277 2277 * Main task to dump pages. This is called on the dump CPU.
2278 2278 */
2279 2279 static void
2280 2280 dumpsys_main_task(void *arg)
2281 2281 {
2282 2282 dumpsync_t *ds = arg;
2283 2283 pgcnt_t pagenum = 0, bitnum = 0, hibitnum;
2284 2284 dumpmlw_t mlw;
2285 2285 cbuf_t *cp;
2286 2286 pgcnt_t baseoff, pfnoff;
2287 2287 pfn_t base, pfn;
2288 2288 int sec, i, dumpserial;
2289 2289
2290 2290 /*
2291 2291 * Fall back to serial mode if there are no helpers.
2292 2292 * dump_plat_mincpu can be set to 0 at any time.
2293 2293 * dumpcfg.helpermap must contain at least one member.
2294 2294 */
2295 2295 dumpserial = 1;
2296 2296
2297 2297 if (dump_plat_mincpu != 0 && dumpcfg.clevel != 0) {
2298 2298 for (i = 0; i < BT_BITOUL(NCPU); ++i) {
2299 2299 if (dumpcfg.helpermap[i] != 0) {
2300 2300 dumpserial = 0;
2301 2301 break;
2302 2302 }
2303 2303 }
2304 2304 }
2305 2305
2306 2306 if (dumpserial) {
2307 2307 dumpcfg.clevel = 0;
2308 2308 if (dumpcfg.helper[0].lzbuf == NULL)
2309 2309 dumpcfg.helper[0].lzbuf = dumpcfg.helper[1].page;
2310 2310 }
2311 2311
2312 2312 dump_init_memlist_walker(&mlw);
2313 2313
2314 2314 /* CONSTCOND */
2315 2315 while (1) {
2316 2316
2317 2317 if (ds->percent > ds->percent_done) {
2318 2318 ds->percent_done = ds->percent;
2319 2319 sec = (gethrtime() - ds->start) / 1000 / 1000 / 1000;
2320 2320 uprintf("^\r%2d:%02d %3d%% done",
2321 2321 sec / 60, sec % 60, ds->percent);
2322 2322 ds->neednl = 1;
2323 2323 }
2324 2324
2325 2325 while (CQ_IS_EMPTY(mainq) && !CQ_IS_EMPTY(writerq)) {
2326 2326
2327 2327 /* the writerq never blocks */
2328 2328 cp = CQ_GET(writerq);
2329 2329 if (cp == NULL)
2330 2330 break;
2331 2331
2332 2332 dump_timeleft = dump_timeout;
2333 2333
2334 2334 HRSTART(ds->perpage, write);
2335 2335 dumpvp_write(cp->buf, cp->used);
2336 2336 HRSTOP(ds->perpage, write);
2337 2337
2338 2338 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2339 2339 }
2340 2340
2341 2341 /*
2342 2342 * Wait here for some buffers to process. Returns NULL
2343 2343 * when all helpers have terminated and all buffers
2344 2344 * have been processed.
2345 2345 */
2346 2346 cp = CQ_GET(mainq);
2347 2347
2348 2348 if (cp == NULL) {
2349 2349
2350 2350 /* Drain the write queue. */
2351 2351 if (!CQ_IS_EMPTY(writerq))
2352 2352 continue;
2353 2353
2354 2354 /* Main task exits here. */
2355 2355 break;
2356 2356 }
2357 2357
2358 2358 dump_timeleft = dump_timeout;
2359 2359
2360 2360 switch (cp->state) {
2361 2361
2362 2362 case CBUF_FREEMAP:
2363 2363
2364 2364 /*
2365 2365 * Note that we drop CBUF_FREEMAP buffers on
2366 2366 * the floor (they will not be on any cqueue)
2367 2367 * when we no longer need them.
2368 2368 */
2369 2369 if (bitnum >= dumpcfg.bitmapsize)
2370 2370 break;
2371 2371
2372 2372 if (dump_ioerr) {
2373 2373 bitnum = dumpcfg.bitmapsize;
2374 2374 CQ_CLOSE(helperq);
2375 2375 break;
2376 2376 }
2377 2377
2378 2378 HRSTART(ds->perpage, bitmap);
2379 2379 for (; bitnum < dumpcfg.bitmapsize; bitnum++)
2380 2380 if (BT_TEST(dumpcfg.bitmap, bitnum))
2381 2381 break;
2382 2382 HRSTOP(ds->perpage, bitmap);
2383 2383 dump_timeleft = dump_timeout;
2384 2384
2385 2385 if (bitnum >= dumpcfg.bitmapsize) {
2386 2386 CQ_CLOSE(helperq);
2387 2387 break;
2388 2388 }
2389 2389
2390 2390 /*
2391 2391 * Try to map CBUF_MAPSIZE ranges. Can't
2392 2392 * assume that memory segment size is a
2393 2393 * multiple of CBUF_MAPSIZE. Can't assume that
2394 2394 * the segment starts on a CBUF_MAPSIZE
2395 2395 * boundary.
2396 2396 */
2397 2397 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2398 2398 ASSERT(pfn != PFN_INVALID);
2399 2399 ASSERT(bitnum + mlw.mpleft <= dumpcfg.bitmapsize);
2400 2400
2401 2401 base = P2ALIGN(pfn, CBUF_MAPNP);
2402 2402 if (base < mlw.mpaddr) {
2403 2403 base = mlw.mpaddr;
2404 2404 baseoff = P2PHASE(base, CBUF_MAPNP);
2405 2405 } else {
2406 2406 baseoff = 0;
2407 2407 }
2408 2408
2409 2409 pfnoff = pfn - base;
2410 2410 if (pfnoff + mlw.mpleft < CBUF_MAPNP) {
2411 2411 hibitnum = bitnum + mlw.mpleft;
2412 2412 cp->size = ptob(pfnoff + mlw.mpleft);
2413 2413 } else {
2414 2414 hibitnum = bitnum - pfnoff + CBUF_MAPNP -
2415 2415 baseoff;
2416 2416 cp->size = CBUF_MAPSIZE - ptob(baseoff);
2417 2417 }
2418 2418
2419 2419 cp->pfn = pfn;
2420 2420 cp->bitnum = bitnum++;
2421 2421 cp->pagenum = pagenum++;
2422 2422 cp->off = ptob(pfnoff);
2423 2423
2424 2424 for (; bitnum < hibitnum; bitnum++)
2425 2425 if (BT_TEST(dumpcfg.bitmap, bitnum))
2426 2426 pagenum++;
2427 2427
2428 2428 dump_timeleft = dump_timeout;
2429 2429 cp->used = ptob(pagenum - cp->pagenum);
2430 2430
2431 2431 HRSTART(ds->perpage, map);
2432 2432 hat_devload(kas.a_hat, cp->buf, cp->size, base,
2433 2433 PROT_READ, HAT_LOAD_NOCONSIST);
2434 2434 HRSTOP(ds->perpage, map);
2435 2435
2436 2436 ds->pages_mapped += btop(cp->size);
2437 2437 ds->pages_used += pagenum - cp->pagenum;
2438 2438
2439 2439 CQ_OPEN(mainq);
2440 2440
2441 2441 /*
2442 2442 * If there are no helpers the main task does
2443 2443 * non-streams lzjb compress.
2444 2444 */
2445 2445 if (dumpserial) {
2446 2446 dumpsys_lzjb_page(dumpcfg.helper, cp);
2447 2447 break;
2448 2448 }
2449 2449
2450 2450 /* pass mapped pages to a helper */
2451 2451 CQ_PUT(helperq, cp, CBUF_INREADY);
2452 2452
2453 2453 /* the last page was done */
2454 2454 if (bitnum >= dumpcfg.bitmapsize)
2455 2455 CQ_CLOSE(helperq);
2456 2456
2457 2457 break;
2458 2458
2459 2459 case CBUF_USEDMAP:
2460 2460
2461 2461 ds->npages += btop(cp->used);
2462 2462
2463 2463 HRSTART(ds->perpage, unmap);
2464 2464 hat_unload(kas.a_hat, cp->buf, cp->size, HAT_UNLOAD);
2465 2465 HRSTOP(ds->perpage, unmap);
2466 2466
2467 2467 if (bitnum < dumpcfg.bitmapsize)
2468 2468 CQ_PUT(mainq, cp, CBUF_FREEMAP);
2469 2469 CQ_CLOSE(mainq);
2470 2470
2471 2471 ASSERT(ds->npages <= dumphdr->dump_npages);
2472 2472 ds->percent = ds->npages * 100LL / dumphdr->dump_npages;
2473 2473 break;
2474 2474
2475 2475 case CBUF_WRITE:
2476 2476
2477 2477 CQ_PUT(writerq, cp, CBUF_WRITE);
2478 2478 break;
2479 2479
2480 2480 case CBUF_ERRMSG:
2481 2481
2482 2482 if (cp->used > 0) {
2483 2483 cp->buf[cp->size - 2] = '\n';
2484 2484 cp->buf[cp->size - 1] = '\0';
2485 2485 if (ds->neednl) {
2486 2486 uprintf("\n%s", cp->buf);
2487 2487 ds->neednl = 0;
2488 2488 } else {
2489 2489 uprintf("%s", cp->buf);
2490 2490 }
2491 2491 /* wait for console output */
2492 2492 drv_usecwait(200000);
2493 2493 dump_timeleft = dump_timeout;
2494 2494 }
2495 2495 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2496 2496 break;
2497 2497
2498 2498 default:
2499 2499 uprintf("dump: unexpected buffer state %d, "
2500 2500 "buffer will be lost\n", cp->state);
2501 2501 break;
2502 2502
2503 2503 } /* end switch */
2504 2504
2505 2505 } /* end while(1) */
2506 2506 }
2507 2507
2508 2508 #ifdef COLLECT_METRICS
2509 2509 size_t
2510 2510 dumpsys_metrics(dumpsync_t *ds, char *buf, size_t size)
2511 2511 {
2512 2512 dumpcfg_t *cfg = &dumpcfg;
2513 2513 int myid = CPU->cpu_seqid;
2514 2514 int i, compress_ratio;
2515 2515 int sec, iorate;
2516 2516 helper_t *hp, *hpend = &cfg->helper[cfg->nhelper];
2517 2517 char *e = buf + size;
2518 2518 char *p = buf;
2519 2519
2520 2520 sec = ds->elapsed / (1000 * 1000 * 1000ULL);
2521 2521 if (sec < 1)
2522 2522 sec = 1;
2523 2523
2524 2524 if (ds->iotime < 1)
2525 2525 ds->iotime = 1;
2526 2526 iorate = (ds->nwrite * 100000ULL) / ds->iotime;
2527 2527
2528 2528 compress_ratio = 100LL * ds->npages / btopr(ds->nwrite + 1);
2529 2529
2530 2530 #define P(...) (p += p < e ? snprintf(p, e - p, __VA_ARGS__) : 0)
2531 2531
2532 2532 P("Master cpu_seqid,%d\n", CPU->cpu_seqid);
2533 2533 P("Master cpu_id,%d\n", CPU->cpu_id);
2534 2534 P("dump_flags,0x%x\n", dumphdr->dump_flags);
2535 2535 P("dump_ioerr,%d\n", dump_ioerr);
2536 2536
2537 2537 P("Helpers:\n");
2538 2538 for (i = 0; i < ncpus; i++) {
2539 2539 if ((i & 15) == 0)
2540 2540 P(",,%03d,", i);
2541 2541 if (i == myid)
2542 2542 P(" M");
2543 2543 else if (BT_TEST(cfg->helpermap, i))
2544 2544 P("%4d", cpu_seq[i]->cpu_id);
2545 2545 else
2546 2546 P(" *");
2547 2547 if ((i & 15) == 15)
2548 2548 P("\n");
2549 2549 }
2550 2550
2551 2551 P("ncbuf_used,%d\n", cfg->ncbuf_used);
2552 2552 P("ncmap,%d\n", cfg->ncmap);
2553 2553
2554 2554 P("Found %ldM ranges,%ld\n", (CBUF_MAPSIZE / DUMP_1MB), cfg->found4m);
2555 2555 P("Found small pages,%ld\n", cfg->foundsm);
2556 2556
2557 2557 P("Compression level,%d\n", cfg->clevel);
2558 2558 P("Compression type,%s %s\n", cfg->clevel == 0 ? "serial" : "parallel",
2559 2559 cfg->clevel >= DUMP_CLEVEL_BZIP2 ? "bzip2" : "lzjb");
2560 2560 P("Compression ratio,%d.%02d\n", compress_ratio / 100, compress_ratio %
2561 2561 100);
2562 2562 P("nhelper_used,%d\n", cfg->nhelper_used);
2563 2563
2564 2564 P("Dump I/O rate MBS,%d.%02d\n", iorate / 100, iorate % 100);
2565 2565 P("..total bytes,%lld\n", (u_longlong_t)ds->nwrite);
2566 2566 P("..total nsec,%lld\n", (u_longlong_t)ds->iotime);
2567 2567 P("dumpbuf.iosize,%ld\n", dumpbuf.iosize);
2568 2568 P("dumpbuf.size,%ld\n", dumpbuf.size);
2569 2569
2570 2570 P("Dump pages/sec,%llu\n", (u_longlong_t)ds->npages / sec);
2571 2571 P("Dump pages,%llu\n", (u_longlong_t)ds->npages);
2572 2572 P("Dump time,%d\n", sec);
2573 2573
2574 2574 if (ds->pages_mapped > 0)
2575 2575 P("per-cent map utilization,%d\n", (int)((100 * ds->pages_used)
2576 2576 / ds->pages_mapped));
2577 2577
2578 2578 P("\nPer-page metrics:\n");
2579 2579 if (ds->npages > 0) {
2580 2580 for (hp = cfg->helper; hp != hpend; hp++) {
2581 2581 #define PERPAGE(x) ds->perpage.x += hp->perpage.x;
2582 2582 PERPAGES;
2583 2583 #undef PERPAGE
2584 2584 }
2585 2585 #define PERPAGE(x) \
2586 2586 P("%s nsec/page,%d\n", #x, (int)(ds->perpage.x / ds->npages));
2587 2587 PERPAGES;
2588 2588 #undef PERPAGE
2589 2589 P("freebufq.empty,%d\n", (int)(ds->freebufq.empty /
2590 2590 ds->npages));
2591 2591 P("helperq.empty,%d\n", (int)(ds->helperq.empty /
2592 2592 ds->npages));
2593 2593 P("writerq.empty,%d\n", (int)(ds->writerq.empty /
2594 2594 ds->npages));
2595 2595 P("mainq.empty,%d\n", (int)(ds->mainq.empty / ds->npages));
2596 2596
2597 2597 P("I/O wait nsec/page,%llu\n", (u_longlong_t)(ds->iowait /
2598 2598 ds->npages));
2599 2599 }
2600 2600 #undef P
2601 2601 if (p < e)
2602 2602 bzero(p, e - p);
2603 2603 return (p - buf);
2604 2604 }
2605 2605 #endif /* COLLECT_METRICS */
2606 2606
2607 2607 /*
2608 2608 * Dump the system.
2609 2609 */
2610 2610 void
2611 2611 dumpsys(void)
2612 2612 {
2613 2613 dumpsync_t *ds = &dumpsync;
2614 2614 taskq_t *livetaskq = NULL;
2615 2615 pfn_t pfn;
2616 2616 pgcnt_t bitnum;
2617 2617 proc_t *p;
2618 2618 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2619 2619 cbuf_t *cp;
2620 2620 pid_t npids, pidx;
2621 2621 char *content;
2622 2622 char *buf;
2623 2623 size_t size;
2624 2624 int save_dump_clevel;
2625 2625 dumpmlw_t mlw;
2626 2626 dumpcsize_t datatag;
2627 2627 dumpdatahdr_t datahdr;
2628 2628
2629 2629 if (dumpvp == NULL || dumphdr == NULL) {
2630 2630 uprintf("skipping system dump - no dump device configured\n");
2631 2631 if (panicstr) {
2632 2632 dumpcfg.helpers_wanted = 0;
2633 2633 dumpsys_spinunlock(&dumpcfg.helper_lock);
2634 2634 }
2635 2635 return;
2636 2636 }
2637 2637 dumpbuf.cur = dumpbuf.start;
2638 2638
2639 2639 /* clear the sync variables */
2640 2640 ASSERT(dumpcfg.nhelper > 0);
2641 2641 bzero(ds, sizeof (*ds));
2642 2642 ds->dumpcpu = CPU->cpu_id;
2643 2643
2644 2644 /*
2645 2645 * Calculate the starting block for dump. If we're dumping on a
2646 2646 * swap device, start 1/5 of the way in; otherwise, start at the
2647 2647 * beginning. And never use the first page -- it may be a disk label.
2648 2648 */
2649 2649 if (dumpvp->v_flag & VISSWAP)
2650 2650 dumphdr->dump_start = P2ROUNDUP(dumpvp_size / 5, DUMP_OFFSET);
2651 2651 else
2652 2652 dumphdr->dump_start = DUMP_OFFSET;
2653 2653
2654 2654 dumphdr->dump_flags = DF_VALID | DF_COMPLETE | DF_LIVE | DF_COMPRESSED;
2655 2655 dumphdr->dump_crashtime = gethrestime_sec();
2656 2656 dumphdr->dump_npages = 0;
2657 2657 dumphdr->dump_nvtop = 0;
2658 2658 bzero(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.bitmapsize));
2659 2659 dump_timeleft = dump_timeout;
2660 2660
2661 2661 if (panicstr) {
2662 2662 dumphdr->dump_flags &= ~DF_LIVE;
2663 2663 (void) VOP_DUMPCTL(dumpvp, DUMP_FREE, NULL, NULL);
2664 2664 (void) VOP_DUMPCTL(dumpvp, DUMP_ALLOC, NULL, NULL);
2665 2665 (void) vsnprintf(dumphdr->dump_panicstring, DUMP_PANICSIZE,
2666 2666 panicstr, panicargs);
2667 2667
2668 2668 }
2669 2669
2670 2670 if (dump_conflags & DUMP_ALL)
2671 2671 content = "all";
2672 2672 else if (dump_conflags & DUMP_CURPROC)
2673 2673 content = "kernel + curproc";
2674 2674 else
2675 2675 content = "kernel";
2676 2676 uprintf("dumping to %s, offset %lld, content: %s\n", dumppath,
2677 2677 dumphdr->dump_start, content);
2678 2678
2679 2679 /* Make sure nodename is current */
2680 2680 bcopy(utsname.nodename, dumphdr->dump_utsname.nodename, SYS_NMLN);
2681 2681
2682 2682 /*
2683 2683 * If this is a live dump, try to open a VCHR vnode for better
2684 2684 * performance. We must take care to flush the buffer cache
2685 2685 * first.
2686 2686 */
2687 2687 if (!panicstr) {
2688 2688 vnode_t *cdev_vp, *cmn_cdev_vp;
2689 2689
2690 2690 ASSERT(dumpbuf.cdev_vp == NULL);
2691 2691 cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR);
2692 2692 if (cdev_vp != NULL) {
2693 2693 cmn_cdev_vp = common_specvp(cdev_vp);
2694 2694 if (VOP_OPEN(&cmn_cdev_vp, FREAD | FWRITE, kcred, NULL)
2695 2695 == 0) {
2696 2696 if (vn_has_cached_data(dumpvp))
2697 2697 (void) pvn_vplist_dirty(dumpvp, 0, NULL,
2698 2698 B_INVAL | B_TRUNC, kcred);
2699 2699 dumpbuf.cdev_vp = cmn_cdev_vp;
2700 2700 } else {
2701 2701 VN_RELE(cdev_vp);
2702 2702 }
2703 2703 }
2704 2704 }
2705 2705
2706 2706 /*
2707 2707 * Store a hires timestamp so we can look it up during debugging.
2708 2708 */
2709 2709 lbolt_debug_entry();
2710 2710
2711 2711 /*
2712 2712 * Leave room for the message and ereport save areas and terminal dump
2713 2713 * header.
2714 2714 */
2715 2715 dumpbuf.vp_limit = dumpvp_size - DUMP_LOGSIZE - DUMP_OFFSET -
2716 2716 DUMP_ERPTSIZE;
2717 2717
2718 2718 /*
2719 2719 * Write out the symbol table. It's no longer compressed,
2720 2720 * so its 'size' and 'csize' are equal.
2721 2721 */
2722 2722 dumpbuf.vp_off = dumphdr->dump_ksyms = dumphdr->dump_start + PAGESIZE;
2723 2723 dumphdr->dump_ksyms_size = dumphdr->dump_ksyms_csize =
2724 2724 ksyms_snapshot(dumpvp_ksyms_write, NULL, LONG_MAX);
2725 2725
2726 2726 /*
2727 2727 * Write out the translation map.
2728 2728 */
2729 2729 dumphdr->dump_map = dumpvp_flush();
2730 2730 dump_as(&kas);
2731 2731 dumphdr->dump_nvtop += dump_plat_addr();
2732 2732
2733 2733 /*
2734 2734 * call into hat, which may have unmapped pages that also need to
2735 2735 * be in the dump
2736 2736 */
2737 2737 hat_dump();
2738 2738
2739 2739 if (dump_conflags & DUMP_ALL) {
2740 2740 mutex_enter(&pidlock);
2741 2741
2742 2742 for (npids = 0, p = practive; p != NULL; p = p->p_next)
2743 2743 dumpcfg.pids[npids++] = p->p_pid;
2744 2744
2745 2745 mutex_exit(&pidlock);
2746 2746
2747 2747 for (pidx = 0; pidx < npids; pidx++)
2748 2748 (void) dump_process(dumpcfg.pids[pidx]);
2749 2749
2750 2750 dump_init_memlist_walker(&mlw);
2751 2751 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2752 2752 dump_timeleft = dump_timeout;
2753 2753 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2754 2754 /*
2755 2755 * Some hypervisors do not have all pages available to
2756 2756 * be accessed by the guest OS. Check for page
2757 2757 * accessibility.
2758 2758 */
2759 2759 if (plat_hold_page(pfn, PLAT_HOLD_NO_LOCK, NULL) !=
2760 2760 PLAT_HOLD_OK)
2761 2761 continue;
2762 2762 BT_SET(dumpcfg.bitmap, bitnum);
2763 2763 }
2764 2764 dumphdr->dump_npages = dumpcfg.bitmapsize;
2765 2765 dumphdr->dump_flags |= DF_ALL;
2766 2766
2767 2767 } else if (dump_conflags & DUMP_CURPROC) {
2768 2768 /*
2769 2769 * Determine which pid is to be dumped. If we're panicking, we
2770 2770 * dump the process associated with panic_thread (if any). If
2771 2771 * this is a live dump, we dump the process associated with
2772 2772 * curthread.
2773 2773 */
2774 2774 npids = 0;
2775 2775 if (panicstr) {
2776 2776 if (panic_thread != NULL &&
2777 2777 panic_thread->t_procp != NULL &&
2778 2778 panic_thread->t_procp != &p0) {
2779 2779 dumpcfg.pids[npids++] =
2780 2780 panic_thread->t_procp->p_pid;
2781 2781 }
2782 2782 } else {
2783 2783 dumpcfg.pids[npids++] = curthread->t_procp->p_pid;
2784 2784 }
2785 2785
2786 2786 if (npids && dump_process(dumpcfg.pids[0]) == 0)
2787 2787 dumphdr->dump_flags |= DF_CURPROC;
2788 2788 else
2789 2789 dumphdr->dump_flags |= DF_KERNEL;
2790 2790
2791 2791 } else {
2792 2792 dumphdr->dump_flags |= DF_KERNEL;
2793 2793 }
2794 2794
2795 2795 dumphdr->dump_hashmask = (1 << highbit(dumphdr->dump_nvtop - 1)) - 1;
2796 2796
2797 2797 /*
2798 2798 * Write out the pfn table.
2799 2799 */
2800 2800 dumphdr->dump_pfn = dumpvp_flush();
2801 2801 dump_init_memlist_walker(&mlw);
2802 2802 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2803 2803 dump_timeleft = dump_timeout;
2804 2804 if (!BT_TEST(dumpcfg.bitmap, bitnum))
2805 2805 continue;
2806 2806 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2807 2807 ASSERT(pfn != PFN_INVALID);
2808 2808 dumpvp_write(&pfn, sizeof (pfn_t));
2809 2809 }
2810 2810 dump_plat_pfn();
2811 2811
2812 2812 /*
2813 2813 * Write out all the pages.
2814 2814 * Map pages, copy them handling UEs, compress, and write them out.
2815 2815 * Cooperate with any helpers running on CPUs in panic_idle().
2816 2816 */
2817 2817 dumphdr->dump_data = dumpvp_flush();
2818 2818
2819 2819 bzero(dumpcfg.helpermap, BT_SIZEOFMAP(NCPU));
2820 2820 ds->live = dumpcfg.clevel > 0 &&
2821 2821 (dumphdr->dump_flags & DF_LIVE) != 0;
2822 2822
2823 2823 save_dump_clevel = dumpcfg.clevel;
2824 2824 if (panicstr)
2825 2825 dumpsys_get_maxmem();
2826 2826 else if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2827 2827 dumpcfg.clevel = DUMP_CLEVEL_LZJB;
2828 2828
2829 2829 dumpcfg.nhelper_used = 0;
2830 2830 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2831 2831 if (hp->page == NULL) {
2832 2832 hp->helper = DONEHELPER;
2833 2833 continue;
2834 2834 }
2835 2835 ++dumpcfg.nhelper_used;
2836 2836 hp->helper = FREEHELPER;
2837 2837 hp->taskqid = NULL;
2838 2838 hp->ds = ds;
2839 2839 bzero(&hp->perpage, sizeof (hp->perpage));
2840 2840 if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2841 2841 (void) BZ2_bzCompressReset(&hp->bzstream);
2842 2842 }
2843 2843
2844 2844 CQ_OPEN(freebufq);
2845 2845 CQ_OPEN(helperq);
2846 2846
2847 2847 dumpcfg.ncbuf_used = 0;
2848 2848 for (cp = dumpcfg.cbuf; cp != &dumpcfg.cbuf[dumpcfg.ncbuf]; cp++) {
2849 2849 if (cp->buf != NULL) {
2850 2850 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2851 2851 ++dumpcfg.ncbuf_used;
2852 2852 }
2853 2853 }
2854 2854
2855 2855 for (cp = dumpcfg.cmap; cp != &dumpcfg.cmap[dumpcfg.ncmap]; cp++)
2856 2856 CQ_PUT(mainq, cp, CBUF_FREEMAP);
2857 2857
2858 2858 ds->start = gethrtime();
2859 2859 ds->iowaitts = ds->start;
2860 2860
2861 2861 /* start helpers */
2862 2862 if (ds->live) {
2863 2863 int n = dumpcfg.nhelper_used;
2864 2864 int pri = MINCLSYSPRI - 25;
2865 2865
2866 2866 livetaskq = taskq_create("LiveDump", n, pri, n, n,
2867 2867 TASKQ_PREPOPULATE);
2868 2868 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2869 2869 if (hp->page == NULL)
2870 2870 continue;
2871 2871 hp->helper = hp - dumpcfg.helper;
2872 2872 hp->taskqid = taskq_dispatch(livetaskq,
2873 2873 dumpsys_live_helper, (void *)hp, TQ_NOSLEEP);
2874 2874 }
2875 2875
2876 2876 } else {
2877 2877 if (panicstr)
2878 2878 kmem_dump_begin();
2879 2879 dumpcfg.helpers_wanted = dumpcfg.clevel > 0;
2880 2880 dumpsys_spinunlock(&dumpcfg.helper_lock);
2881 2881 }
2882 2882
2883 2883 /* run main task */
2884 2884 dumpsys_main_task(ds);
2885 2885
2886 2886 ds->elapsed = gethrtime() - ds->start;
2887 2887 if (ds->elapsed < 1)
2888 2888 ds->elapsed = 1;
2889 2889
2890 2890 if (livetaskq != NULL)
2891 2891 taskq_destroy(livetaskq);
2892 2892
2893 2893 if (ds->neednl) {
2894 2894 uprintf("\n");
2895 2895 ds->neednl = 0;
2896 2896 }
2897 2897
2898 2898 /* record actual pages dumped */
2899 2899 dumphdr->dump_npages = ds->npages;
2900 2900
2901 2901 /* platform-specific data */
2902 2902 dumphdr->dump_npages += dump_plat_data(dumpcfg.cbuf[0].buf);
2903 2903
2904 2904 /* note any errors by clearing DF_COMPLETE */
2905 2905 if (dump_ioerr || ds->npages < dumphdr->dump_npages)
2906 2906 dumphdr->dump_flags &= ~DF_COMPLETE;
2907 2907
2908 2908 /* end of stream blocks */
2909 2909 datatag = 0;
2910 2910 dumpvp_write(&datatag, sizeof (datatag));
2911 2911
2912 2912 bzero(&datahdr, sizeof (datahdr));
2913 2913
2914 2914 /* buffer for metrics */
2915 2915 buf = dumpcfg.cbuf[0].buf;
2916 2916 size = MIN(dumpcfg.cbuf[0].size, DUMP_OFFSET - sizeof (dumphdr_t) -
2917 2917 sizeof (dumpdatahdr_t));
2918 2918
2919 2919 /* finish the kmem intercepts, collect kmem verbose info */
2920 2920 if (panicstr) {
2921 2921 datahdr.dump_metrics = kmem_dump_finish(buf, size);
2922 2922 buf += datahdr.dump_metrics;
2923 2923 size -= datahdr.dump_metrics;
2924 2924 }
2925 2925
2926 2926 /* record in the header whether this is a fault-management panic */
2927 2927 if (panicstr)
2928 2928 dumphdr->dump_fm_panic = is_fm_panic();
2929 2929
2930 2930 /* compression info in data header */
2931 2931 datahdr.dump_datahdr_magic = DUMP_DATAHDR_MAGIC;
2932 2932 datahdr.dump_datahdr_version = DUMP_DATAHDR_VERSION;
2933 2933 datahdr.dump_maxcsize = CBUF_SIZE;
2934 2934 datahdr.dump_maxrange = CBUF_MAPSIZE / PAGESIZE;
2935 2935 datahdr.dump_nstreams = dumpcfg.nhelper_used;
2936 2936 datahdr.dump_clevel = dumpcfg.clevel;
2937 2937 #ifdef COLLECT_METRICS
2938 2938 if (dump_metrics_on)
2939 2939 datahdr.dump_metrics += dumpsys_metrics(ds, buf, size);
2940 2940 #endif
2941 2941 datahdr.dump_data_csize = dumpvp_flush() - dumphdr->dump_data;
2942 2942
2943 2943 /*
2944 2944 * Write out the initial and terminal dump headers.
2945 2945 */
2946 2946 dumpbuf.vp_off = dumphdr->dump_start;
2947 2947 dumpvp_write(dumphdr, sizeof (dumphdr_t));
2948 2948 (void) dumpvp_flush();
2949 2949
2950 2950 dumpbuf.vp_limit = dumpvp_size;
2951 2951 dumpbuf.vp_off = dumpbuf.vp_limit - DUMP_OFFSET;
2952 2952 dumpvp_write(dumphdr, sizeof (dumphdr_t));
2953 2953 dumpvp_write(&datahdr, sizeof (dumpdatahdr_t));
2954 2954 dumpvp_write(dumpcfg.cbuf[0].buf, datahdr.dump_metrics);
2955 2955
2956 2956 (void) dumpvp_flush();
2957 2957
2958 2958 uprintf("\r%3d%% done: %llu pages dumped, ",
2959 2959 ds->percent_done, (u_longlong_t)ds->npages);
2960 2960
2961 2961 if (dump_ioerr == 0) {
2962 2962 uprintf("dump succeeded\n");
2963 2963 } else {
2964 2964 uprintf("dump failed: error %d\n", dump_ioerr);
2965 2965 #ifdef DEBUG
2966 2966 if (panicstr)
2967 2967 debug_enter("dump failed");
2968 2968 #endif
2969 2969 }
2970 2970
2971 2971 /*
2972 2972 * Write out all undelivered messages. This has to be the *last*
2973 2973 * thing we do because the dump process itself emits messages.
2974 2974 */
2975 2975 if (panicstr) {
2976 2976 dump_summary();
2977 2977 dump_ereports();
2978 2978 dump_messages();
2979 2979 }
2980 2980
2981 2981 delay(2 * hz); /* let people see the 'done' message */
2982 2982 dump_timeleft = 0;
2983 2983 dump_ioerr = 0;
2984 2984
2985 2985 /* restore settings after live dump completes */
2986 2986 if (!panicstr) {
2987 2987 dumpcfg.clevel = save_dump_clevel;
2988 2988
2989 2989 /* release any VCHR open of the dump device */
2990 2990 if (dumpbuf.cdev_vp != NULL) {
2991 2991 (void) VOP_CLOSE(dumpbuf.cdev_vp, FREAD | FWRITE, 1, 0,
2992 2992 kcred, NULL);
2993 2993 VN_RELE(dumpbuf.cdev_vp);
2994 2994 dumpbuf.cdev_vp = NULL;
2995 2995 }
2996 2996 }
2997 2997 }
2998 2998
2999 2999 /*
3000 3000 * This function is called whenever the memory size, as represented
3001 3001 * by the phys_install list, changes.
3002 3002 */
3003 3003 void
3004 3004 dump_resize()
3005 3005 {
3006 3006 mutex_enter(&dump_lock);
3007 3007 dumphdr_init();
3008 3008 dumpbuf_resize();
3009 3009 dump_update_clevel();
3010 3010 mutex_exit(&dump_lock);
3011 3011 }
3012 3012
3013 3013 /*
3014 3014 * This function allows for dynamic resizing of a dump area. It assumes that
3015 3015 * the underlying device has update its appropriate size(9P).
3016 3016 */
3017 3017 int
3018 3018 dumpvp_resize()
3019 3019 {
3020 3020 int error;
3021 3021 vattr_t vattr;
3022 3022
3023 3023 mutex_enter(&dump_lock);
3024 3024 vattr.va_mask = AT_SIZE;
3025 3025 if ((error = VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL)) != 0) {
3026 3026 mutex_exit(&dump_lock);
3027 3027 return (error);
3028 3028 }
3029 3029
3030 3030 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE) {
3031 3031 mutex_exit(&dump_lock);
3032 3032 return (ENOSPC);
3033 3033 }
3034 3034
3035 3035 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
3036 3036 mutex_exit(&dump_lock);
3037 3037 return (0);
3038 3038 }
3039 3039
3040 3040 int
3041 3041 dump_set_uuid(const char *uuidstr)
3042 3042 {
3043 3043 const char *ptr;
3044 3044 int i;
3045 3045
3046 3046 if (uuidstr == NULL || strnlen(uuidstr, 36 + 1) != 36)
3047 3047 return (EINVAL);
3048 3048
3049 3049 /* uuid_parse is not common code so check manually */
3050 3050 for (i = 0, ptr = uuidstr; i < 36; i++, ptr++) {
3051 3051 switch (i) {
3052 3052 case 8:
3053 3053 case 13:
3054 3054 case 18:
3055 3055 case 23:
3056 3056 if (*ptr != '-')
3057 3057 return (EINVAL);
3058 3058 break;
3059 3059
3060 3060 default:
3061 3061 if (!isxdigit(*ptr))
3062 3062 return (EINVAL);
3063 3063 break;
3064 3064 }
3065 3065 }
3066 3066
3067 3067 if (dump_osimage_uuid[0] != '\0')
3068 3068 return (EALREADY);
3069 3069
3070 3070 (void) strncpy(dump_osimage_uuid, uuidstr, 36 + 1);
3071 3071
3072 3072 cmn_err(CE_CONT, "?This Solaris instance has UUID %s\n",
3073 3073 dump_osimage_uuid);
3074 3074
3075 3075 return (0);
3076 3076 }
3077 3077
3078 3078 const char *
3079 3079 dump_get_uuid(void)
3080 3080 {
3081 3081 return (dump_osimage_uuid[0] != '\0' ? dump_osimage_uuid : "");
3082 3082 }
↓ open down ↓ |
1654 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX