Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/fm.c
+++ new/usr/src/uts/common/os/fm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * Fault Management Architecture (FMA) Resource and Protocol Support
27 27 *
28 28 * The routines contained herein provide services to support kernel subsystems
29 29 * in publishing fault management telemetry (see PSARC 2002/412 and 2003/089).
30 30 *
31 31 * Name-Value Pair Lists
32 32 *
33 33 * The embodiment of an FMA protocol element (event, fmri or authority) is a
34 34 * name-value pair list (nvlist_t). FMA-specific nvlist construtor and
35 35 * destructor functions, fm_nvlist_create() and fm_nvlist_destroy(), are used
36 36 * to create an nvpair list using custom allocators. Callers may choose to
37 37 * allocate either from the kernel memory allocator, or from a preallocated
38 38 * buffer, useful in constrained contexts like high-level interrupt routines.
39 39 *
40 40 * Protocol Event and FMRI Construction
41 41 *
42 42 * Convenience routines are provided to construct nvlist events according to
43 43 * the FMA Event Protocol and Naming Schema specification for ereports and
44 44 * FMRIs for the dev, cpu, hc, mem, legacy hc and de schemes.
45 45 *
46 46 * ENA Manipulation
47 47 *
48 48 * Routines to generate ENA formats 0, 1 and 2 are available as well as
49 49 * routines to increment formats 1 and 2. Individual fields within the
50 50 * ENA are extractable via fm_ena_time_get(), fm_ena_id_get(),
51 51 * fm_ena_format_get() and fm_ena_gen_get().
52 52 */
53 53
54 54 #include <sys/types.h>
55 55 #include <sys/time.h>
56 56 #include <sys/sysevent.h>
57 57 #include <sys/sysevent_impl.h>
58 58 #include <sys/nvpair.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/cpuvar.h>
61 61 #include <sys/sysmacros.h>
62 62 #include <sys/systm.h>
63 63 #include <sys/ddifm.h>
64 64 #include <sys/ddifm_impl.h>
65 65 #include <sys/spl.h>
66 66 #include <sys/dumphdr.h>
67 67 #include <sys/compress.h>
68 68 #include <sys/cpuvar.h>
69 69 #include <sys/console.h>
70 70 #include <sys/panic.h>
71 71 #include <sys/kobj.h>
72 72 #include <sys/sunddi.h>
73 73 #include <sys/systeminfo.h>
74 74 #include <sys/sysevent/eventdefs.h>
75 75 #include <sys/fm/util.h>
76 76 #include <sys/fm/protocol.h>
77 77
78 78 /*
79 79 * URL and SUNW-MSG-ID value to display for fm_panic(), defined below. These
80 80 * values must be kept in sync with the FMA source code in usr/src/cmd/fm.
81 81 */
82 82 static const char *fm_url = "http://illumos.org/msg";
83 83 static const char *fm_msgid = "SUNOS-8000-0G";
84 84 static char *volatile fm_panicstr = NULL;
85 85
86 86 errorq_t *ereport_errorq;
87 87 void *ereport_dumpbuf;
88 88 size_t ereport_dumplen;
89 89
90 90 static uint_t ereport_chanlen = ERPT_EVCH_MAX;
91 91 static evchan_t *ereport_chan = NULL;
92 92 static ulong_t ereport_qlen = 0;
93 93 static size_t ereport_size = 0;
94 94 static int ereport_cols = 80;
95 95
96 96 extern void fastreboot_disable_highpil(void);
97 97
98 98 /*
99 99 * Common fault management kstats to record ereport generation
100 100 * failures
101 101 */
102 102
103 103 struct erpt_kstat {
104 104 kstat_named_t erpt_dropped; /* num erpts dropped on post */
105 105 kstat_named_t erpt_set_failed; /* num erpt set failures */
106 106 kstat_named_t fmri_set_failed; /* num fmri set failures */
107 107 kstat_named_t payload_set_failed; /* num payload set failures */
108 108 };
109 109
110 110 static struct erpt_kstat erpt_kstat_data = {
111 111 { "erpt-dropped", KSTAT_DATA_UINT64 },
112 112 { "erpt-set-failed", KSTAT_DATA_UINT64 },
113 113 { "fmri-set-failed", KSTAT_DATA_UINT64 },
114 114 { "payload-set-failed", KSTAT_DATA_UINT64 }
115 115 };
116 116
117 117 /*ARGSUSED*/
118 118 static void
119 119 fm_drain(void *private, void *data, errorq_elem_t *eep)
120 120 {
121 121 nvlist_t *nvl = errorq_elem_nvl(ereport_errorq, eep);
122 122
123 123 if (!panicstr)
124 124 (void) fm_ereport_post(nvl, EVCH_TRYHARD);
125 125 else
126 126 fm_nvprint(nvl);
127 127 }
128 128
129 129 void
130 130 fm_init(void)
131 131 {
132 132 kstat_t *ksp;
133 133
134 134 (void) sysevent_evc_bind(FM_ERROR_CHAN,
135 135 &ereport_chan, EVCH_CREAT | EVCH_HOLD_PEND);
136 136
137 137 (void) sysevent_evc_control(ereport_chan,
138 138 EVCH_SET_CHAN_LEN, &ereport_chanlen);
139 139
140 140 if (ereport_qlen == 0)
141 141 ereport_qlen = ERPT_MAX_ERRS * MAX(max_ncpus, 4);
142 142
143 143 if (ereport_size == 0)
144 144 ereport_size = ERPT_DATA_SZ;
145 145
146 146 ereport_errorq = errorq_nvcreate("fm_ereport_queue",
147 147 (errorq_func_t)fm_drain, NULL, ereport_qlen, ereport_size,
148 148 FM_ERR_PIL, ERRORQ_VITAL);
149 149 if (ereport_errorq == NULL)
150 150 panic("failed to create required ereport error queue");
151 151
152 152 ereport_dumpbuf = kmem_alloc(ereport_size, KM_SLEEP);
153 153 ereport_dumplen = ereport_size;
154 154
155 155 /* Initialize ereport allocation and generation kstats */
156 156 ksp = kstat_create("unix", 0, "fm", "misc", KSTAT_TYPE_NAMED,
157 157 sizeof (struct erpt_kstat) / sizeof (kstat_named_t),
158 158 KSTAT_FLAG_VIRTUAL);
159 159
160 160 if (ksp != NULL) {
161 161 ksp->ks_data = &erpt_kstat_data;
162 162 kstat_install(ksp);
163 163 } else {
164 164 cmn_err(CE_NOTE, "failed to create fm/misc kstat\n");
165 165
166 166 }
167 167 }
168 168
169 169 /*
170 170 * Formatting utility function for fm_nvprintr. We attempt to wrap chunks of
171 171 * output so they aren't split across console lines, and return the end column.
172 172 */
173 173 /*PRINTFLIKE4*/
174 174 static int
175 175 fm_printf(int depth, int c, int cols, const char *format, ...)
176 176 {
177 177 va_list ap;
178 178 int width;
179 179 char c1;
180 180
181 181 va_start(ap, format);
182 182 width = vsnprintf(&c1, sizeof (c1), format, ap);
183 183 va_end(ap);
184 184
185 185 if (c + width >= cols) {
186 186 console_printf("\n\r");
187 187 c = 0;
188 188 if (format[0] != ' ' && depth > 0) {
189 189 console_printf(" ");
190 190 c++;
191 191 }
192 192 }
193 193
194 194 va_start(ap, format);
195 195 console_vprintf(format, ap);
196 196 va_end(ap);
197 197
198 198 return ((c + width) % cols);
199 199 }
200 200
201 201 /*
202 202 * Recursively print a nvlist in the specified column width and return the
203 203 * column we end up in. This function is called recursively by fm_nvprint(),
204 204 * below. We generically format the entire nvpair using hexadecimal
205 205 * integers and strings, and elide any integer arrays. Arrays are basically
206 206 * used for cache dumps right now, so we suppress them so as not to overwhelm
207 207 * the amount of console output we produce at panic time. This can be further
208 208 * enhanced as FMA technology grows based upon the needs of consumers. All
209 209 * FMA telemetry is logged using the dump device transport, so the console
210 210 * output serves only as a fallback in case this procedure is unsuccessful.
211 211 */
212 212 static int
213 213 fm_nvprintr(nvlist_t *nvl, int d, int c, int cols)
214 214 {
215 215 nvpair_t *nvp;
216 216
217 217 for (nvp = nvlist_next_nvpair(nvl, NULL);
218 218 nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
219 219
220 220 data_type_t type = nvpair_type(nvp);
221 221 const char *name = nvpair_name(nvp);
222 222
223 223 boolean_t b;
224 224 uint8_t i8;
225 225 uint16_t i16;
226 226 uint32_t i32;
227 227 uint64_t i64;
228 228 char *str;
229 229 nvlist_t *cnv;
230 230
231 231 if (strcmp(name, FM_CLASS) == 0)
232 232 continue; /* already printed by caller */
233 233
234 234 c = fm_printf(d, c, cols, " %s=", name);
235 235
236 236 switch (type) {
237 237 case DATA_TYPE_BOOLEAN:
238 238 c = fm_printf(d + 1, c, cols, " 1");
239 239 break;
240 240
241 241 case DATA_TYPE_BOOLEAN_VALUE:
242 242 (void) nvpair_value_boolean_value(nvp, &b);
243 243 c = fm_printf(d + 1, c, cols, b ? "1" : "0");
244 244 break;
245 245
246 246 case DATA_TYPE_BYTE:
247 247 (void) nvpair_value_byte(nvp, &i8);
248 248 c = fm_printf(d + 1, c, cols, "%x", i8);
249 249 break;
250 250
251 251 case DATA_TYPE_INT8:
252 252 (void) nvpair_value_int8(nvp, (void *)&i8);
253 253 c = fm_printf(d + 1, c, cols, "%x", i8);
254 254 break;
255 255
256 256 case DATA_TYPE_UINT8:
257 257 (void) nvpair_value_uint8(nvp, &i8);
258 258 c = fm_printf(d + 1, c, cols, "%x", i8);
259 259 break;
260 260
261 261 case DATA_TYPE_INT16:
262 262 (void) nvpair_value_int16(nvp, (void *)&i16);
263 263 c = fm_printf(d + 1, c, cols, "%x", i16);
264 264 break;
265 265
266 266 case DATA_TYPE_UINT16:
267 267 (void) nvpair_value_uint16(nvp, &i16);
268 268 c = fm_printf(d + 1, c, cols, "%x", i16);
269 269 break;
270 270
271 271 case DATA_TYPE_INT32:
272 272 (void) nvpair_value_int32(nvp, (void *)&i32);
273 273 c = fm_printf(d + 1, c, cols, "%x", i32);
274 274 break;
275 275
276 276 case DATA_TYPE_UINT32:
277 277 (void) nvpair_value_uint32(nvp, &i32);
278 278 c = fm_printf(d + 1, c, cols, "%x", i32);
279 279 break;
280 280
281 281 case DATA_TYPE_INT64:
282 282 (void) nvpair_value_int64(nvp, (void *)&i64);
283 283 c = fm_printf(d + 1, c, cols, "%llx",
284 284 (u_longlong_t)i64);
285 285 break;
286 286
287 287 case DATA_TYPE_UINT64:
288 288 (void) nvpair_value_uint64(nvp, &i64);
289 289 c = fm_printf(d + 1, c, cols, "%llx",
290 290 (u_longlong_t)i64);
291 291 break;
292 292
293 293 case DATA_TYPE_HRTIME:
294 294 (void) nvpair_value_hrtime(nvp, (void *)&i64);
295 295 c = fm_printf(d + 1, c, cols, "%llx",
296 296 (u_longlong_t)i64);
297 297 break;
298 298
299 299 case DATA_TYPE_STRING:
300 300 (void) nvpair_value_string(nvp, &str);
301 301 c = fm_printf(d + 1, c, cols, "\"%s\"",
302 302 str ? str : "<NULL>");
303 303 break;
304 304
305 305 case DATA_TYPE_NVLIST:
306 306 c = fm_printf(d + 1, c, cols, "[");
307 307 (void) nvpair_value_nvlist(nvp, &cnv);
308 308 c = fm_nvprintr(cnv, d + 1, c, cols);
309 309 c = fm_printf(d + 1, c, cols, " ]");
310 310 break;
311 311
312 312 case DATA_TYPE_NVLIST_ARRAY: {
313 313 nvlist_t **val;
314 314 uint_t i, nelem;
315 315
316 316 c = fm_printf(d + 1, c, cols, "[");
317 317 (void) nvpair_value_nvlist_array(nvp, &val, &nelem);
318 318 for (i = 0; i < nelem; i++) {
319 319 c = fm_nvprintr(val[i], d + 1, c, cols);
320 320 }
321 321 c = fm_printf(d + 1, c, cols, " ]");
322 322 }
323 323 break;
324 324
325 325 case DATA_TYPE_BOOLEAN_ARRAY:
326 326 case DATA_TYPE_BYTE_ARRAY:
327 327 case DATA_TYPE_INT8_ARRAY:
328 328 case DATA_TYPE_UINT8_ARRAY:
329 329 case DATA_TYPE_INT16_ARRAY:
330 330 case DATA_TYPE_UINT16_ARRAY:
331 331 case DATA_TYPE_INT32_ARRAY:
332 332 case DATA_TYPE_UINT32_ARRAY:
333 333 case DATA_TYPE_INT64_ARRAY:
334 334 case DATA_TYPE_UINT64_ARRAY:
335 335 case DATA_TYPE_STRING_ARRAY:
336 336 c = fm_printf(d + 1, c, cols, "[...]");
337 337 break;
338 338 case DATA_TYPE_UNKNOWN:
339 339 c = fm_printf(d + 1, c, cols, "<unknown>");
340 340 break;
341 341 }
342 342 }
343 343
344 344 return (c);
345 345 }
346 346
347 347 void
348 348 fm_nvprint(nvlist_t *nvl)
349 349 {
350 350 char *class;
351 351 int c = 0;
352 352
353 353 console_printf("\r");
354 354
355 355 if (nvlist_lookup_string(nvl, FM_CLASS, &class) == 0)
356 356 c = fm_printf(0, c, ereport_cols, "%s", class);
357 357
358 358 if (fm_nvprintr(nvl, 0, c, ereport_cols) != 0)
359 359 console_printf("\n");
360 360
361 361 console_printf("\n");
362 362 }
363 363
364 364 /*
365 365 * Wrapper for panic() that first produces an FMA-style message for admins.
366 366 * Normally such messages are generated by fmd(1M)'s syslog-msgs agent: this
367 367 * is the one exception to that rule and the only error that gets messaged.
368 368 * This function is intended for use by subsystems that have detected a fatal
369 369 * error and enqueued appropriate ereports and wish to then force a panic.
370 370 */
371 371 /*PRINTFLIKE1*/
372 372 void
373 373 fm_panic(const char *format, ...)
374 374 {
375 375 va_list ap;
376 376
377 377 (void) atomic_cas_ptr((void *)&fm_panicstr, NULL, (void *)format);
378 378 #if defined(__i386) || defined(__amd64)
379 379 fastreboot_disable_highpil();
380 380 #endif /* __i386 || __amd64 */
381 381 va_start(ap, format);
382 382 vpanic(format, ap);
383 383 va_end(ap);
384 384 }
385 385
386 386 /*
387 387 * Simply tell the caller if fm_panicstr is set, ie. an fma event has
388 388 * caused the panic. If so, something other than the default panic
389 389 * diagnosis method will diagnose the cause of the panic.
390 390 */
391 391 int
392 392 is_fm_panic()
393 393 {
394 394 if (fm_panicstr)
395 395 return (1);
396 396 else
397 397 return (0);
398 398 }
399 399
400 400 /*
401 401 * Print any appropriate FMA banner message before the panic message. This
402 402 * function is called by panicsys() and prints the message for fm_panic().
403 403 * We print the message here so that it comes after the system is quiesced.
404 404 * A one-line summary is recorded in the log only (cmn_err(9F) with "!" prefix).
405 405 * The rest of the message is for the console only and not needed in the log,
406 406 * so it is printed using console_printf(). We break it up into multiple
407 407 * chunks so as to avoid overflowing any small legacy prom_printf() buffers.
408 408 */
409 409 void
410 410 fm_banner(void)
411 411 {
412 412 timespec_t tod;
413 413 hrtime_t now;
414 414
415 415 if (!fm_panicstr)
416 416 return; /* panic was not initiated by fm_panic(); do nothing */
417 417
418 418 if (panicstr) {
419 419 tod = panic_hrestime;
420 420 now = panic_hrtime;
421 421 } else {
422 422 gethrestime(&tod);
423 423 now = gethrtime_waitfree();
424 424 }
425 425
426 426 cmn_err(CE_NOTE, "!SUNW-MSG-ID: %s, "
427 427 "TYPE: Error, VER: 1, SEVERITY: Major\n", fm_msgid);
428 428
429 429 console_printf(
430 430 "\n\rSUNW-MSG-ID: %s, TYPE: Error, VER: 1, SEVERITY: Major\n"
431 431 "EVENT-TIME: 0x%lx.0x%lx (0x%llx)\n",
432 432 fm_msgid, tod.tv_sec, tod.tv_nsec, (u_longlong_t)now);
433 433
434 434 console_printf(
435 435 "PLATFORM: %s, CSN: -, HOSTNAME: %s\n"
436 436 "SOURCE: %s, REV: %s %s\n",
437 437 platform, utsname.nodename, utsname.sysname,
438 438 utsname.release, utsname.version);
439 439
440 440 console_printf(
441 441 "DESC: Errors have been detected that require a reboot to ensure system\n"
442 442 "integrity. See %s/%s for more information.\n",
443 443 fm_url, fm_msgid);
444 444
445 445 console_printf(
446 446 "AUTO-RESPONSE: Solaris will attempt to save and diagnose the error telemetry\n"
447 447 "IMPACT: The system will sync files, save a crash dump if needed, and reboot\n"
448 448 "REC-ACTION: Save the error summary below in case telemetry cannot be saved\n");
449 449
450 450 console_printf("\n");
451 451 }
452 452
453 453 /*
454 454 * Utility function to write all of the pending ereports to the dump device.
455 455 * This function is called at either normal reboot or panic time, and simply
456 456 * iterates over the in-transit messages in the ereport sysevent channel.
457 457 */
458 458 void
459 459 fm_ereport_dump(void)
460 460 {
461 461 evchanq_t *chq;
462 462 sysevent_t *sep;
463 463 erpt_dump_t ed;
464 464
465 465 timespec_t tod;
466 466 hrtime_t now;
467 467 char *buf;
468 468 size_t len;
469 469
470 470 if (panicstr) {
471 471 tod = panic_hrestime;
472 472 now = panic_hrtime;
473 473 } else {
474 474 if (ereport_errorq != NULL)
475 475 errorq_drain(ereport_errorq);
476 476 gethrestime(&tod);
477 477 now = gethrtime_waitfree();
478 478 }
479 479
480 480 /*
481 481 * In the panic case, sysevent_evc_walk_init() will return NULL.
482 482 */
483 483 if ((chq = sysevent_evc_walk_init(ereport_chan, NULL)) == NULL &&
484 484 !panicstr)
485 485 return; /* event channel isn't initialized yet */
486 486
487 487 while ((sep = sysevent_evc_walk_step(chq)) != NULL) {
488 488 if ((buf = sysevent_evc_event_attr(sep, &len)) == NULL)
489 489 break;
490 490
491 491 ed.ed_magic = ERPT_MAGIC;
492 492 ed.ed_chksum = checksum32(buf, len);
493 493 ed.ed_size = (uint32_t)len;
494 494 ed.ed_pad = 0;
495 495 ed.ed_hrt_nsec = SE_TIME(sep);
496 496 ed.ed_hrt_base = now;
497 497 ed.ed_tod_base.sec = tod.tv_sec;
498 498 ed.ed_tod_base.nsec = tod.tv_nsec;
499 499
500 500 dumpvp_write(&ed, sizeof (ed));
501 501 dumpvp_write(buf, len);
502 502 }
503 503
504 504 sysevent_evc_walk_fini(chq);
505 505 }
506 506
507 507 /*
508 508 * Post an error report (ereport) to the sysevent error channel. The error
509 509 * channel must be established with a prior call to sysevent_evc_create()
↓ open down ↓ |
509 lines elided |
↑ open up ↑ |
510 510 * before publication may occur.
511 511 */
512 512 void
513 513 fm_ereport_post(nvlist_t *ereport, int evc_flag)
514 514 {
515 515 size_t nvl_size = 0;
516 516 evchan_t *error_chan;
517 517
518 518 (void) nvlist_size(ereport, &nvl_size, NV_ENCODE_NATIVE);
519 519 if (nvl_size > ERPT_DATA_SZ || nvl_size == 0) {
520 - atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
520 + atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
521 521 return;
522 522 }
523 523
524 524 if (sysevent_evc_bind(FM_ERROR_CHAN, &error_chan,
525 525 EVCH_CREAT|EVCH_HOLD_PEND) != 0) {
526 - atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
526 + atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
527 527 return;
528 528 }
529 529
530 530 if (sysevent_evc_publish(error_chan, EC_FM, ESC_FM_ERROR,
531 531 SUNW_VENDOR, FM_PUB, ereport, evc_flag) != 0) {
532 - atomic_add_64(&erpt_kstat_data.erpt_dropped.value.ui64, 1);
532 + atomic_inc_64(&erpt_kstat_data.erpt_dropped.value.ui64);
533 533 (void) sysevent_evc_unbind(error_chan);
534 534 return;
535 535 }
536 536 (void) sysevent_evc_unbind(error_chan);
537 537 }
538 538
539 539 /*
540 540 * Wrapppers for FM nvlist allocators
541 541 */
542 542 /* ARGSUSED */
543 543 static void *
544 544 i_fm_alloc(nv_alloc_t *nva, size_t size)
545 545 {
546 546 return (kmem_zalloc(size, KM_SLEEP));
547 547 }
548 548
549 549 /* ARGSUSED */
550 550 static void
551 551 i_fm_free(nv_alloc_t *nva, void *buf, size_t size)
552 552 {
553 553 kmem_free(buf, size);
554 554 }
555 555
556 556 const nv_alloc_ops_t fm_mem_alloc_ops = {
557 557 NULL,
558 558 NULL,
559 559 i_fm_alloc,
560 560 i_fm_free,
561 561 NULL
562 562 };
563 563
564 564 /*
565 565 * Create and initialize a new nv_alloc_t for a fixed buffer, buf. A pointer
566 566 * to the newly allocated nv_alloc_t structure is returned upon success or NULL
567 567 * is returned to indicate that the nv_alloc structure could not be created.
568 568 */
569 569 nv_alloc_t *
570 570 fm_nva_xcreate(char *buf, size_t bufsz)
571 571 {
572 572 nv_alloc_t *nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
573 573
574 574 if (bufsz == 0 || nv_alloc_init(nvhdl, nv_fixed_ops, buf, bufsz) != 0) {
575 575 kmem_free(nvhdl, sizeof (nv_alloc_t));
576 576 return (NULL);
577 577 }
578 578
579 579 return (nvhdl);
580 580 }
581 581
582 582 /*
583 583 * Destroy a previously allocated nv_alloc structure. The fixed buffer
584 584 * associated with nva must be freed by the caller.
585 585 */
586 586 void
587 587 fm_nva_xdestroy(nv_alloc_t *nva)
588 588 {
589 589 nv_alloc_fini(nva);
590 590 kmem_free(nva, sizeof (nv_alloc_t));
591 591 }
592 592
593 593 /*
594 594 * Create a new nv list. A pointer to a new nv list structure is returned
595 595 * upon success or NULL is returned to indicate that the structure could
596 596 * not be created. The newly created nv list is created and managed by the
597 597 * operations installed in nva. If nva is NULL, the default FMA nva
598 598 * operations are installed and used.
599 599 *
600 600 * When called from the kernel and nva == NULL, this function must be called
601 601 * from passive kernel context with no locks held that can prevent a
602 602 * sleeping memory allocation from occurring. Otherwise, this function may
603 603 * be called from other kernel contexts as long a valid nva created via
604 604 * fm_nva_create() is supplied.
605 605 */
606 606 nvlist_t *
607 607 fm_nvlist_create(nv_alloc_t *nva)
608 608 {
609 609 int hdl_alloced = 0;
610 610 nvlist_t *nvl;
611 611 nv_alloc_t *nvhdl;
612 612
613 613 if (nva == NULL) {
614 614 nvhdl = kmem_zalloc(sizeof (nv_alloc_t), KM_SLEEP);
615 615
616 616 if (nv_alloc_init(nvhdl, &fm_mem_alloc_ops, NULL, 0) != 0) {
617 617 kmem_free(nvhdl, sizeof (nv_alloc_t));
618 618 return (NULL);
619 619 }
620 620 hdl_alloced = 1;
621 621 } else {
622 622 nvhdl = nva;
623 623 }
624 624
625 625 if (nvlist_xalloc(&nvl, NV_UNIQUE_NAME, nvhdl) != 0) {
626 626 if (hdl_alloced) {
627 627 nv_alloc_fini(nvhdl);
628 628 kmem_free(nvhdl, sizeof (nv_alloc_t));
629 629 }
630 630 return (NULL);
631 631 }
632 632
633 633 return (nvl);
634 634 }
635 635
636 636 /*
637 637 * Destroy a previously allocated nvlist structure. flag indicates whether
638 638 * or not the associated nva structure should be freed (FM_NVA_FREE) or
639 639 * retained (FM_NVA_RETAIN). Retaining the nv alloc structure allows
640 640 * it to be re-used for future nvlist creation operations.
641 641 */
642 642 void
643 643 fm_nvlist_destroy(nvlist_t *nvl, int flag)
644 644 {
645 645 nv_alloc_t *nva = nvlist_lookup_nv_alloc(nvl);
646 646
647 647 nvlist_free(nvl);
648 648
649 649 if (nva != NULL) {
650 650 if (flag == FM_NVA_FREE)
651 651 fm_nva_xdestroy(nva);
652 652 }
653 653 }
654 654
655 655 int
656 656 i_fm_payload_set(nvlist_t *payload, const char *name, va_list ap)
657 657 {
658 658 int nelem, ret = 0;
659 659 data_type_t type;
660 660
661 661 while (ret == 0 && name != NULL) {
662 662 type = va_arg(ap, data_type_t);
663 663 switch (type) {
664 664 case DATA_TYPE_BYTE:
665 665 ret = nvlist_add_byte(payload, name,
666 666 va_arg(ap, uint_t));
667 667 break;
668 668 case DATA_TYPE_BYTE_ARRAY:
669 669 nelem = va_arg(ap, int);
670 670 ret = nvlist_add_byte_array(payload, name,
671 671 va_arg(ap, uchar_t *), nelem);
672 672 break;
673 673 case DATA_TYPE_BOOLEAN_VALUE:
674 674 ret = nvlist_add_boolean_value(payload, name,
675 675 va_arg(ap, boolean_t));
676 676 break;
677 677 case DATA_TYPE_BOOLEAN_ARRAY:
678 678 nelem = va_arg(ap, int);
679 679 ret = nvlist_add_boolean_array(payload, name,
680 680 va_arg(ap, boolean_t *), nelem);
681 681 break;
682 682 case DATA_TYPE_INT8:
683 683 ret = nvlist_add_int8(payload, name,
684 684 va_arg(ap, int));
685 685 break;
686 686 case DATA_TYPE_INT8_ARRAY:
687 687 nelem = va_arg(ap, int);
688 688 ret = nvlist_add_int8_array(payload, name,
689 689 va_arg(ap, int8_t *), nelem);
690 690 break;
691 691 case DATA_TYPE_UINT8:
692 692 ret = nvlist_add_uint8(payload, name,
693 693 va_arg(ap, uint_t));
694 694 break;
695 695 case DATA_TYPE_UINT8_ARRAY:
696 696 nelem = va_arg(ap, int);
697 697 ret = nvlist_add_uint8_array(payload, name,
698 698 va_arg(ap, uint8_t *), nelem);
699 699 break;
700 700 case DATA_TYPE_INT16:
701 701 ret = nvlist_add_int16(payload, name,
702 702 va_arg(ap, int));
703 703 break;
704 704 case DATA_TYPE_INT16_ARRAY:
705 705 nelem = va_arg(ap, int);
706 706 ret = nvlist_add_int16_array(payload, name,
707 707 va_arg(ap, int16_t *), nelem);
708 708 break;
709 709 case DATA_TYPE_UINT16:
710 710 ret = nvlist_add_uint16(payload, name,
711 711 va_arg(ap, uint_t));
712 712 break;
713 713 case DATA_TYPE_UINT16_ARRAY:
714 714 nelem = va_arg(ap, int);
715 715 ret = nvlist_add_uint16_array(payload, name,
716 716 va_arg(ap, uint16_t *), nelem);
717 717 break;
718 718 case DATA_TYPE_INT32:
719 719 ret = nvlist_add_int32(payload, name,
720 720 va_arg(ap, int32_t));
721 721 break;
722 722 case DATA_TYPE_INT32_ARRAY:
723 723 nelem = va_arg(ap, int);
724 724 ret = nvlist_add_int32_array(payload, name,
725 725 va_arg(ap, int32_t *), nelem);
726 726 break;
727 727 case DATA_TYPE_UINT32:
728 728 ret = nvlist_add_uint32(payload, name,
729 729 va_arg(ap, uint32_t));
730 730 break;
731 731 case DATA_TYPE_UINT32_ARRAY:
732 732 nelem = va_arg(ap, int);
733 733 ret = nvlist_add_uint32_array(payload, name,
734 734 va_arg(ap, uint32_t *), nelem);
735 735 break;
736 736 case DATA_TYPE_INT64:
737 737 ret = nvlist_add_int64(payload, name,
738 738 va_arg(ap, int64_t));
739 739 break;
740 740 case DATA_TYPE_INT64_ARRAY:
741 741 nelem = va_arg(ap, int);
742 742 ret = nvlist_add_int64_array(payload, name,
743 743 va_arg(ap, int64_t *), nelem);
744 744 break;
745 745 case DATA_TYPE_UINT64:
746 746 ret = nvlist_add_uint64(payload, name,
747 747 va_arg(ap, uint64_t));
748 748 break;
749 749 case DATA_TYPE_UINT64_ARRAY:
750 750 nelem = va_arg(ap, int);
751 751 ret = nvlist_add_uint64_array(payload, name,
752 752 va_arg(ap, uint64_t *), nelem);
753 753 break;
754 754 case DATA_TYPE_STRING:
755 755 ret = nvlist_add_string(payload, name,
756 756 va_arg(ap, char *));
757 757 break;
758 758 case DATA_TYPE_STRING_ARRAY:
759 759 nelem = va_arg(ap, int);
760 760 ret = nvlist_add_string_array(payload, name,
761 761 va_arg(ap, char **), nelem);
762 762 break;
763 763 case DATA_TYPE_NVLIST:
764 764 ret = nvlist_add_nvlist(payload, name,
765 765 va_arg(ap, nvlist_t *));
766 766 break;
767 767 case DATA_TYPE_NVLIST_ARRAY:
768 768 nelem = va_arg(ap, int);
769 769 ret = nvlist_add_nvlist_array(payload, name,
770 770 va_arg(ap, nvlist_t **), nelem);
771 771 break;
772 772 default:
773 773 ret = EINVAL;
774 774 }
775 775
776 776 name = va_arg(ap, char *);
777 777 }
778 778 return (ret);
779 779 }
780 780
781 781 void
782 782 fm_payload_set(nvlist_t *payload, ...)
783 783 {
↓ open down ↓ |
241 lines elided |
↑ open up ↑ |
784 784 int ret;
785 785 const char *name;
786 786 va_list ap;
787 787
788 788 va_start(ap, payload);
789 789 name = va_arg(ap, char *);
790 790 ret = i_fm_payload_set(payload, name, ap);
791 791 va_end(ap);
792 792
793 793 if (ret)
794 - atomic_add_64(
795 - &erpt_kstat_data.payload_set_failed.value.ui64, 1);
794 + atomic_inc_64(&erpt_kstat_data.payload_set_failed.value.ui64);
796 795 }
797 796
798 797 /*
799 798 * Set-up and validate the members of an ereport event according to:
800 799 *
801 800 * Member name Type Value
802 801 * ====================================================
803 802 * class string ereport
804 803 * version uint8_t 0
805 804 * ena uint64_t <ena>
806 805 * detector nvlist_t <detector>
807 806 * ereport-payload nvlist_t <var args>
808 807 *
809 808 * We don't actually add a 'version' member to the payload. Really,
810 809 * the version quoted to us by our caller is that of the category 1
811 810 * "ereport" event class (and we require FM_EREPORT_VERS0) but
812 811 * the payload version of the actual leaf class event under construction
813 812 * may be something else. Callers should supply a version in the varargs,
814 813 * or (better) we could take two version arguments - one for the
815 814 * ereport category 1 classification (expect FM_EREPORT_VERS0) and one
816 815 * for the leaf class.
817 816 */
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
818 817 void
819 818 fm_ereport_set(nvlist_t *ereport, int version, const char *erpt_class,
820 819 uint64_t ena, const nvlist_t *detector, ...)
821 820 {
822 821 char ereport_class[FM_MAX_CLASS];
823 822 const char *name;
824 823 va_list ap;
825 824 int ret;
826 825
827 826 if (version != FM_EREPORT_VERS0) {
828 - atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
827 + atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
829 828 return;
830 829 }
831 830
832 831 (void) snprintf(ereport_class, FM_MAX_CLASS, "%s.%s",
833 832 FM_EREPORT_CLASS, erpt_class);
834 833 if (nvlist_add_string(ereport, FM_CLASS, ereport_class) != 0) {
835 - atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
834 + atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
836 835 return;
837 836 }
838 837
839 838 if (nvlist_add_uint64(ereport, FM_EREPORT_ENA, ena)) {
840 - atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
839 + atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
841 840 }
842 841
843 842 if (nvlist_add_nvlist(ereport, FM_EREPORT_DETECTOR,
844 843 (nvlist_t *)detector) != 0) {
845 - atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
844 + atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
846 845 }
847 846
848 847 va_start(ap, detector);
849 848 name = va_arg(ap, const char *);
850 849 ret = i_fm_payload_set(ereport, name, ap);
851 850 va_end(ap);
852 851
853 852 if (ret)
854 - atomic_add_64(&erpt_kstat_data.erpt_set_failed.value.ui64, 1);
853 + atomic_inc_64(&erpt_kstat_data.erpt_set_failed.value.ui64);
855 854 }
856 855
857 856 /*
858 857 * Set-up and validate the members of an hc fmri according to;
859 858 *
860 859 * Member name Type Value
861 860 * ===================================================
862 861 * version uint8_t 0
863 862 * auth nvlist_t <auth>
864 863 * hc-name string <name>
865 864 * hc-id string <id>
866 865 *
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
867 866 * Note that auth and hc-id are optional members.
868 867 */
869 868
870 869 #define HC_MAXPAIRS 20
871 870 #define HC_MAXNAMELEN 50
872 871
873 872 static int
874 873 fm_fmri_hc_set_common(nvlist_t *fmri, int version, const nvlist_t *auth)
875 874 {
876 875 if (version != FM_HC_SCHEME_VERSION) {
877 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
876 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
878 877 return (0);
879 878 }
880 879
881 880 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0 ||
882 881 nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC) != 0) {
883 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
882 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
884 883 return (0);
885 884 }
886 885
887 886 if (auth != NULL && nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
888 887 (nvlist_t *)auth) != 0) {
889 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
888 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
890 889 return (0);
891 890 }
892 891
893 892 return (1);
894 893 }
895 894
896 895 void
897 896 fm_fmri_hc_set(nvlist_t *fmri, int version, const nvlist_t *auth,
898 897 nvlist_t *snvl, int npairs, ...)
899 898 {
900 899 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
901 900 nvlist_t *pairs[HC_MAXPAIRS];
902 901 va_list ap;
903 902 int i;
904 903
905 904 if (!fm_fmri_hc_set_common(fmri, version, auth))
906 905 return;
907 906
908 907 npairs = MIN(npairs, HC_MAXPAIRS);
909 908
910 909 va_start(ap, npairs);
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
911 910 for (i = 0; i < npairs; i++) {
912 911 const char *name = va_arg(ap, const char *);
913 912 uint32_t id = va_arg(ap, uint32_t);
914 913 char idstr[11];
915 914
916 915 (void) snprintf(idstr, sizeof (idstr), "%u", id);
917 916
918 917 pairs[i] = fm_nvlist_create(nva);
919 918 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
920 919 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
921 - atomic_add_64(
922 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
920 + atomic_inc_64(
921 + &erpt_kstat_data.fmri_set_failed.value.ui64);
923 922 }
924 923 }
925 924 va_end(ap);
926 925
927 926 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs, npairs) != 0)
928 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
927 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
929 928
930 929 for (i = 0; i < npairs; i++)
931 930 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
932 931
933 932 if (snvl != NULL) {
934 933 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
935 - atomic_add_64(
936 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
934 + atomic_inc_64(
935 + &erpt_kstat_data.fmri_set_failed.value.ui64);
937 936 }
938 937 }
939 938 }
940 939
941 940 /*
942 941 * Set-up and validate the members of an dev fmri according to:
943 942 *
944 943 * Member name Type Value
945 944 * ====================================================
946 945 * version uint8_t 0
947 946 * auth nvlist_t <auth>
948 947 * devpath string <devpath>
949 948 * [devid] string <devid>
950 949 * [target-port-l0id] string <target-port-lun0-id>
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
951 950 *
952 951 * Note that auth and devid are optional members.
953 952 */
954 953 void
955 954 fm_fmri_dev_set(nvlist_t *fmri_dev, int version, const nvlist_t *auth,
956 955 const char *devpath, const char *devid, const char *tpl0)
957 956 {
958 957 int err = 0;
959 958
960 959 if (version != DEV_SCHEME_VERSION0) {
961 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
960 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
962 961 return;
963 962 }
964 963
965 964 err |= nvlist_add_uint8(fmri_dev, FM_VERSION, version);
966 965 err |= nvlist_add_string(fmri_dev, FM_FMRI_SCHEME, FM_FMRI_SCHEME_DEV);
967 966
968 967 if (auth != NULL) {
969 968 err |= nvlist_add_nvlist(fmri_dev, FM_FMRI_AUTHORITY,
970 969 (nvlist_t *)auth);
971 970 }
972 971
973 972 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_PATH, devpath);
974 973
975 974 if (devid != NULL)
976 975 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_ID, devid);
977 976
978 977 if (tpl0 != NULL)
979 978 err |= nvlist_add_string(fmri_dev, FM_FMRI_DEV_TGTPTLUN0, tpl0);
980 979
981 980 if (err)
982 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
981 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
983 982
984 983 }
985 984
986 985 /*
987 986 * Set-up and validate the members of an cpu fmri according to:
988 987 *
989 988 * Member name Type Value
990 989 * ====================================================
991 990 * version uint8_t 0
992 991 * auth nvlist_t <auth>
993 992 * cpuid uint32_t <cpu_id>
994 993 * cpumask uint8_t <cpu_mask>
995 994 * serial uint64_t <serial_id>
996 995 *
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
997 996 * Note that auth, cpumask, serial are optional members.
998 997 *
999 998 */
1000 999 void
1001 1000 fm_fmri_cpu_set(nvlist_t *fmri_cpu, int version, const nvlist_t *auth,
1002 1001 uint32_t cpu_id, uint8_t *cpu_maskp, const char *serial_idp)
1003 1002 {
1004 1003 uint64_t *failedp = &erpt_kstat_data.fmri_set_failed.value.ui64;
1005 1004
1006 1005 if (version < CPU_SCHEME_VERSION1) {
1007 - atomic_add_64(failedp, 1);
1006 + atomic_inc_64(failedp);
1008 1007 return;
1009 1008 }
1010 1009
1011 1010 if (nvlist_add_uint8(fmri_cpu, FM_VERSION, version) != 0) {
1012 - atomic_add_64(failedp, 1);
1011 + atomic_inc_64(failedp);
1013 1012 return;
1014 1013 }
1015 1014
1016 1015 if (nvlist_add_string(fmri_cpu, FM_FMRI_SCHEME,
1017 1016 FM_FMRI_SCHEME_CPU) != 0) {
1018 - atomic_add_64(failedp, 1);
1017 + atomic_inc_64(failedp);
1019 1018 return;
1020 1019 }
1021 1020
1022 1021 if (auth != NULL && nvlist_add_nvlist(fmri_cpu, FM_FMRI_AUTHORITY,
1023 1022 (nvlist_t *)auth) != 0)
1024 - atomic_add_64(failedp, 1);
1023 + atomic_inc_64(failedp);
1025 1024
1026 1025 if (nvlist_add_uint32(fmri_cpu, FM_FMRI_CPU_ID, cpu_id) != 0)
1027 - atomic_add_64(failedp, 1);
1026 + atomic_inc_64(failedp);
1028 1027
1029 1028 if (cpu_maskp != NULL && nvlist_add_uint8(fmri_cpu, FM_FMRI_CPU_MASK,
1030 1029 *cpu_maskp) != 0)
1031 - atomic_add_64(failedp, 1);
1030 + atomic_inc_64(failedp);
1032 1031
1033 1032 if (serial_idp == NULL || nvlist_add_string(fmri_cpu,
1034 1033 FM_FMRI_CPU_SERIAL_ID, (char *)serial_idp) != 0)
1035 - atomic_add_64(failedp, 1);
1034 + atomic_inc_64(failedp);
1036 1035 }
1037 1036
1038 1037 /*
1039 1038 * Set-up and validate the members of a mem according to:
1040 1039 *
1041 1040 * Member name Type Value
1042 1041 * ====================================================
1043 1042 * version uint8_t 0
1044 1043 * auth nvlist_t <auth> [optional]
1045 1044 * unum string <unum>
1046 1045 * serial string <serial> [optional*]
1047 1046 * offset uint64_t <offset> [optional]
1048 1047 *
1049 1048 * * serial is required if offset is present
1050 1049 */
1051 1050 void
1052 1051 fm_fmri_mem_set(nvlist_t *fmri, int version, const nvlist_t *auth,
1053 1052 const char *unum, const char *serial, uint64_t offset)
1054 1053 {
1055 1054 if (version != MEM_SCHEME_VERSION0) {
1056 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1055 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1057 1056 return;
1058 1057 }
1059 1058
1060 1059 if (!serial && (offset != (uint64_t)-1)) {
1061 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1060 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1062 1061 return;
1063 1062 }
1064 1063
1065 1064 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1066 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1065 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1067 1066 return;
1068 1067 }
1069 1068
1070 1069 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_MEM) != 0) {
1071 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1070 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1072 1071 return;
1073 1072 }
1074 1073
1075 1074 if (auth != NULL) {
1076 1075 if (nvlist_add_nvlist(fmri, FM_FMRI_AUTHORITY,
1077 1076 (nvlist_t *)auth) != 0) {
1078 - atomic_add_64(
1079 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1077 + atomic_inc_64(
1078 + &erpt_kstat_data.fmri_set_failed.value.ui64);
1080 1079 }
1081 1080 }
1082 1081
1083 1082 if (nvlist_add_string(fmri, FM_FMRI_MEM_UNUM, unum) != 0) {
1084 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1083 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1085 1084 }
1086 1085
1087 1086 if (serial != NULL) {
1088 1087 if (nvlist_add_string_array(fmri, FM_FMRI_MEM_SERIAL_ID,
1089 1088 (char **)&serial, 1) != 0) {
1090 - atomic_add_64(
1091 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1089 + atomic_inc_64(
1090 + &erpt_kstat_data.fmri_set_failed.value.ui64);
1092 1091 }
1093 - if (offset != (uint64_t)-1) {
1094 - if (nvlist_add_uint64(fmri, FM_FMRI_MEM_OFFSET,
1095 - offset) != 0) {
1096 - atomic_add_64(&erpt_kstat_data.
1097 - fmri_set_failed.value.ui64, 1);
1098 - }
1092 + if (offset != (uint64_t)-1 && nvlist_add_uint64(fmri,
1093 + FM_FMRI_MEM_OFFSET, offset) != 0) {
1094 + atomic_inc_64(
1095 + &erpt_kstat_data.fmri_set_failed.value.ui64);
1099 1096 }
1100 1097 }
1101 1098 }
1102 1099
1103 1100 void
1104 1101 fm_fmri_zfs_set(nvlist_t *fmri, int version, uint64_t pool_guid,
1105 1102 uint64_t vdev_guid)
1106 1103 {
1107 1104 if (version != ZFS_SCHEME_VERSION0) {
1108 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1105 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1109 1106 return;
1110 1107 }
1111 1108
1112 1109 if (nvlist_add_uint8(fmri, FM_VERSION, version) != 0) {
1113 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1110 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1114 1111 return;
1115 1112 }
1116 1113
1117 1114 if (nvlist_add_string(fmri, FM_FMRI_SCHEME, FM_FMRI_SCHEME_ZFS) != 0) {
1118 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1115 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1119 1116 return;
1120 1117 }
1121 1118
1122 1119 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_POOL, pool_guid) != 0) {
1123 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1120 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1124 1121 }
1125 1122
1126 1123 if (vdev_guid != 0) {
1127 1124 if (nvlist_add_uint64(fmri, FM_FMRI_ZFS_VDEV, vdev_guid) != 0) {
1128 - atomic_add_64(
1129 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1125 + atomic_inc_64(
1126 + &erpt_kstat_data.fmri_set_failed.value.ui64);
1130 1127 }
1131 1128 }
1132 1129 }
1133 1130
1134 1131 uint64_t
1135 1132 fm_ena_increment(uint64_t ena)
1136 1133 {
1137 1134 uint64_t new_ena;
1138 1135
1139 1136 switch (ENA_FORMAT(ena)) {
1140 1137 case FM_ENA_FMT1:
1141 1138 new_ena = ena + (1 << ENA_FMT1_GEN_SHFT);
1142 1139 break;
1143 1140 case FM_ENA_FMT2:
1144 1141 new_ena = ena + (1 << ENA_FMT2_GEN_SHFT);
1145 1142 break;
1146 1143 default:
1147 1144 new_ena = 0;
1148 1145 }
1149 1146
1150 1147 return (new_ena);
1151 1148 }
1152 1149
1153 1150 uint64_t
1154 1151 fm_ena_generate_cpu(uint64_t timestamp, processorid_t cpuid, uchar_t format)
1155 1152 {
1156 1153 uint64_t ena = 0;
1157 1154
1158 1155 switch (format) {
1159 1156 case FM_ENA_FMT1:
1160 1157 if (timestamp) {
1161 1158 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1162 1159 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1163 1160 ENA_FMT1_CPUID_MASK) |
1164 1161 ((timestamp << ENA_FMT1_TIME_SHFT) &
1165 1162 ENA_FMT1_TIME_MASK));
1166 1163 } else {
1167 1164 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1168 1165 ((cpuid << ENA_FMT1_CPUID_SHFT) &
1169 1166 ENA_FMT1_CPUID_MASK) |
1170 1167 ((gethrtime_waitfree() << ENA_FMT1_TIME_SHFT) &
1171 1168 ENA_FMT1_TIME_MASK));
1172 1169 }
1173 1170 break;
1174 1171 case FM_ENA_FMT2:
1175 1172 ena = (uint64_t)((format & ENA_FORMAT_MASK) |
1176 1173 ((timestamp << ENA_FMT2_TIME_SHFT) & ENA_FMT2_TIME_MASK));
1177 1174 break;
1178 1175 default:
1179 1176 break;
1180 1177 }
1181 1178
1182 1179 return (ena);
1183 1180 }
1184 1181
1185 1182 uint64_t
1186 1183 fm_ena_generate(uint64_t timestamp, uchar_t format)
1187 1184 {
1188 1185 return (fm_ena_generate_cpu(timestamp, CPU->cpu_id, format));
1189 1186 }
1190 1187
1191 1188 uint64_t
1192 1189 fm_ena_generation_get(uint64_t ena)
1193 1190 {
1194 1191 uint64_t gen;
1195 1192
1196 1193 switch (ENA_FORMAT(ena)) {
1197 1194 case FM_ENA_FMT1:
1198 1195 gen = (ena & ENA_FMT1_GEN_MASK) >> ENA_FMT1_GEN_SHFT;
1199 1196 break;
1200 1197 case FM_ENA_FMT2:
1201 1198 gen = (ena & ENA_FMT2_GEN_MASK) >> ENA_FMT2_GEN_SHFT;
1202 1199 break;
1203 1200 default:
1204 1201 gen = 0;
1205 1202 break;
1206 1203 }
1207 1204
1208 1205 return (gen);
1209 1206 }
1210 1207
1211 1208 uchar_t
1212 1209 fm_ena_format_get(uint64_t ena)
1213 1210 {
1214 1211
1215 1212 return (ENA_FORMAT(ena));
1216 1213 }
1217 1214
1218 1215 uint64_t
1219 1216 fm_ena_id_get(uint64_t ena)
1220 1217 {
1221 1218 uint64_t id;
1222 1219
1223 1220 switch (ENA_FORMAT(ena)) {
1224 1221 case FM_ENA_FMT1:
1225 1222 id = (ena & ENA_FMT1_ID_MASK) >> ENA_FMT1_ID_SHFT;
1226 1223 break;
1227 1224 case FM_ENA_FMT2:
1228 1225 id = (ena & ENA_FMT2_ID_MASK) >> ENA_FMT2_ID_SHFT;
1229 1226 break;
1230 1227 default:
1231 1228 id = 0;
1232 1229 }
1233 1230
1234 1231 return (id);
1235 1232 }
1236 1233
1237 1234 uint64_t
1238 1235 fm_ena_time_get(uint64_t ena)
1239 1236 {
1240 1237 uint64_t time;
1241 1238
1242 1239 switch (ENA_FORMAT(ena)) {
1243 1240 case FM_ENA_FMT1:
1244 1241 time = (ena & ENA_FMT1_TIME_MASK) >> ENA_FMT1_TIME_SHFT;
1245 1242 break;
1246 1243 case FM_ENA_FMT2:
1247 1244 time = (ena & ENA_FMT2_TIME_MASK) >> ENA_FMT2_TIME_SHFT;
1248 1245 break;
1249 1246 default:
1250 1247 time = 0;
1251 1248 }
1252 1249
1253 1250 return (time);
1254 1251 }
1255 1252
1256 1253 /*
1257 1254 * Convert a getpcstack() trace to symbolic name+offset, and add the resulting
1258 1255 * string array to a Fault Management ereport as FM_EREPORT_PAYLOAD_NAME_STACK.
1259 1256 */
1260 1257 void
1261 1258 fm_payload_stack_add(nvlist_t *payload, const pc_t *stack, int depth)
1262 1259 {
1263 1260 int i;
1264 1261 char *sym;
1265 1262 ulong_t off;
1266 1263 char *stkpp[FM_STK_DEPTH];
1267 1264 char buf[FM_STK_DEPTH * FM_SYM_SZ];
1268 1265 char *stkp = buf;
1269 1266
1270 1267 for (i = 0; i < depth && i != FM_STK_DEPTH; i++, stkp += FM_SYM_SZ) {
1271 1268 if ((sym = kobj_getsymname(stack[i], &off)) != NULL)
1272 1269 (void) snprintf(stkp, FM_SYM_SZ, "%s+%lx", sym, off);
1273 1270 else
1274 1271 (void) snprintf(stkp, FM_SYM_SZ, "%lx", (long)stack[i]);
1275 1272 stkpp[i] = stkp;
1276 1273 }
1277 1274
1278 1275 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_STACK,
1279 1276 DATA_TYPE_STRING_ARRAY, depth, stkpp, NULL);
1280 1277 }
1281 1278
1282 1279 void
1283 1280 print_msg_hwerr(ctid_t ct_id, proc_t *p)
1284 1281 {
1285 1282 uprintf("Killed process %d (%s) in contract id %d "
1286 1283 "due to hardware error\n", p->p_pid, p->p_user.u_comm, ct_id);
1287 1284 }
1288 1285
1289 1286 void
1290 1287 fm_fmri_hc_create(nvlist_t *fmri, int version, const nvlist_t *auth,
1291 1288 nvlist_t *snvl, nvlist_t *bboard, int npairs, ...)
1292 1289 {
1293 1290 nv_alloc_t *nva = nvlist_lookup_nv_alloc(fmri);
1294 1291 nvlist_t *pairs[HC_MAXPAIRS];
1295 1292 nvlist_t **hcl;
1296 1293 uint_t n;
1297 1294 int i, j;
1298 1295 va_list ap;
↓ open down ↓ |
159 lines elided |
↑ open up ↑ |
1299 1296 char *hcname, *hcid;
1300 1297
1301 1298 if (!fm_fmri_hc_set_common(fmri, version, auth))
1302 1299 return;
1303 1300
1304 1301 /*
1305 1302 * copy the bboard nvpairs to the pairs array
1306 1303 */
1307 1304 if (nvlist_lookup_nvlist_array(bboard, FM_FMRI_HC_LIST, &hcl, &n)
1308 1305 != 0) {
1309 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1306 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1310 1307 return;
1311 1308 }
1312 1309
1313 1310 for (i = 0; i < n; i++) {
1314 1311 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_NAME,
1315 1312 &hcname) != 0) {
1316 - atomic_add_64(
1317 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1313 + atomic_inc_64(
1314 + &erpt_kstat_data.fmri_set_failed.value.ui64);
1318 1315 return;
1319 1316 }
1320 1317 if (nvlist_lookup_string(hcl[i], FM_FMRI_HC_ID, &hcid) != 0) {
1321 - atomic_add_64(
1322 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1318 + atomic_inc_64(
1319 + &erpt_kstat_data.fmri_set_failed.value.ui64);
1323 1320 return;
1324 1321 }
1325 1322
1326 1323 pairs[i] = fm_nvlist_create(nva);
1327 1324 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, hcname) != 0 ||
1328 1325 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, hcid) != 0) {
1329 1326 for (j = 0; j <= i; j++) {
1330 1327 if (pairs[j] != NULL)
1331 1328 fm_nvlist_destroy(pairs[j],
1332 1329 FM_NVA_RETAIN);
1333 1330 }
1334 - atomic_add_64(
1335 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1331 + atomic_inc_64(
1332 + &erpt_kstat_data.fmri_set_failed.value.ui64);
1336 1333 return;
1337 1334 }
1338 1335 }
1339 1336
1340 1337 /*
1341 1338 * create the pairs from passed in pairs
1342 1339 */
1343 1340 npairs = MIN(npairs, HC_MAXPAIRS);
1344 1341
1345 1342 va_start(ap, npairs);
1346 1343 for (i = n; i < npairs + n; i++) {
1347 1344 const char *name = va_arg(ap, const char *);
1348 1345 uint32_t id = va_arg(ap, uint32_t);
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
1349 1346 char idstr[11];
1350 1347 (void) snprintf(idstr, sizeof (idstr), "%u", id);
1351 1348 pairs[i] = fm_nvlist_create(nva);
1352 1349 if (nvlist_add_string(pairs[i], FM_FMRI_HC_NAME, name) != 0 ||
1353 1350 nvlist_add_string(pairs[i], FM_FMRI_HC_ID, idstr) != 0) {
1354 1351 for (j = 0; j <= i; j++) {
1355 1352 if (pairs[j] != NULL)
1356 1353 fm_nvlist_destroy(pairs[j],
1357 1354 FM_NVA_RETAIN);
1358 1355 }
1359 - atomic_add_64(
1360 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1356 + atomic_inc_64(
1357 + &erpt_kstat_data.fmri_set_failed.value.ui64);
1361 1358 return;
1362 1359 }
1363 1360 }
1364 1361 va_end(ap);
1365 1362
1366 1363 /*
1367 1364 * Create the fmri hc list
1368 1365 */
1369 1366 if (nvlist_add_nvlist_array(fmri, FM_FMRI_HC_LIST, pairs,
1370 1367 npairs + n) != 0) {
1371 - atomic_add_64(&erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1368 + atomic_inc_64(&erpt_kstat_data.fmri_set_failed.value.ui64);
1372 1369 return;
1373 1370 }
1374 1371
1375 1372 for (i = 0; i < npairs + n; i++) {
1376 1373 fm_nvlist_destroy(pairs[i], FM_NVA_RETAIN);
1377 1374 }
1378 1375
1379 1376 if (snvl != NULL) {
1380 1377 if (nvlist_add_nvlist(fmri, FM_FMRI_HC_SPECIFIC, snvl) != 0) {
1381 - atomic_add_64(
1382 - &erpt_kstat_data.fmri_set_failed.value.ui64, 1);
1378 + atomic_inc_64(
1379 + &erpt_kstat_data.fmri_set_failed.value.ui64);
1383 1380 return;
1384 1381 }
1385 1382 }
1386 1383 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX