Print this page
6659 nvlist_free(NULL) is a no-op
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libzfs/common/libzfs_pool.c
+++ new/usr/src/lib/libzfs/common/libzfs_pool.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 25 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
26 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 27 */
28 28
29 29 #include <ctype.h>
30 30 #include <errno.h>
31 31 #include <devid.h>
32 32 #include <fcntl.h>
33 33 #include <libintl.h>
34 34 #include <stdio.h>
35 35 #include <stdlib.h>
36 36 #include <strings.h>
37 37 #include <unistd.h>
38 38 #include <libgen.h>
39 39 #include <sys/efi_partition.h>
40 40 #include <sys/vtoc.h>
41 41 #include <sys/zfs_ioctl.h>
42 42 #include <dlfcn.h>
43 43
44 44 #include "zfs_namecheck.h"
45 45 #include "zfs_prop.h"
46 46 #include "libzfs_impl.h"
47 47 #include "zfs_comutil.h"
48 48 #include "zfeature_common.h"
49 49
50 50 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
51 51
52 52 #define DISK_ROOT "/dev/dsk"
53 53 #define RDISK_ROOT "/dev/rdsk"
54 54 #define BACKUP_SLICE "s2"
55 55
56 56 typedef struct prop_flags {
57 57 int create:1; /* Validate property on creation */
58 58 int import:1; /* Validate property on import */
59 59 } prop_flags_t;
60 60
61 61 /*
62 62 * ====================================================================
63 63 * zpool property functions
64 64 * ====================================================================
65 65 */
66 66
67 67 static int
68 68 zpool_get_all_props(zpool_handle_t *zhp)
69 69 {
70 70 zfs_cmd_t zc = { 0 };
71 71 libzfs_handle_t *hdl = zhp->zpool_hdl;
72 72
73 73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
74 74
75 75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
76 76 return (-1);
77 77
78 78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
79 79 if (errno == ENOMEM) {
80 80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
81 81 zcmd_free_nvlists(&zc);
82 82 return (-1);
83 83 }
84 84 } else {
85 85 zcmd_free_nvlists(&zc);
86 86 return (-1);
87 87 }
88 88 }
89 89
90 90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
91 91 zcmd_free_nvlists(&zc);
92 92 return (-1);
93 93 }
94 94
95 95 zcmd_free_nvlists(&zc);
96 96
97 97 return (0);
98 98 }
99 99
100 100 static int
101 101 zpool_props_refresh(zpool_handle_t *zhp)
102 102 {
103 103 nvlist_t *old_props;
104 104
105 105 old_props = zhp->zpool_props;
106 106
107 107 if (zpool_get_all_props(zhp) != 0)
108 108 return (-1);
109 109
110 110 nvlist_free(old_props);
111 111 return (0);
112 112 }
113 113
114 114 static char *
115 115 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
116 116 zprop_source_t *src)
117 117 {
118 118 nvlist_t *nv, *nvl;
119 119 uint64_t ival;
120 120 char *value;
121 121 zprop_source_t source;
122 122
123 123 nvl = zhp->zpool_props;
124 124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
125 125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
126 126 source = ival;
127 127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
128 128 } else {
129 129 source = ZPROP_SRC_DEFAULT;
130 130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
131 131 value = "-";
132 132 }
133 133
134 134 if (src)
135 135 *src = source;
136 136
137 137 return (value);
138 138 }
139 139
140 140 uint64_t
141 141 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
142 142 {
143 143 nvlist_t *nv, *nvl;
144 144 uint64_t value;
145 145 zprop_source_t source;
146 146
147 147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
148 148 /*
149 149 * zpool_get_all_props() has most likely failed because
150 150 * the pool is faulted, but if all we need is the top level
151 151 * vdev's guid then get it from the zhp config nvlist.
152 152 */
153 153 if ((prop == ZPOOL_PROP_GUID) &&
154 154 (nvlist_lookup_nvlist(zhp->zpool_config,
155 155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
156 156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
157 157 == 0)) {
158 158 return (value);
159 159 }
160 160 return (zpool_prop_default_numeric(prop));
161 161 }
162 162
163 163 nvl = zhp->zpool_props;
164 164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
165 165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
166 166 source = value;
167 167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
168 168 } else {
169 169 source = ZPROP_SRC_DEFAULT;
170 170 value = zpool_prop_default_numeric(prop);
171 171 }
172 172
173 173 if (src)
174 174 *src = source;
175 175
176 176 return (value);
177 177 }
178 178
179 179 /*
180 180 * Map VDEV STATE to printed strings.
181 181 */
182 182 char *
183 183 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
184 184 {
185 185 switch (state) {
186 186 case VDEV_STATE_CLOSED:
187 187 case VDEV_STATE_OFFLINE:
188 188 return (gettext("OFFLINE"));
189 189 case VDEV_STATE_REMOVED:
190 190 return (gettext("REMOVED"));
191 191 case VDEV_STATE_CANT_OPEN:
192 192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
193 193 return (gettext("FAULTED"));
194 194 else if (aux == VDEV_AUX_SPLIT_POOL)
195 195 return (gettext("SPLIT"));
196 196 else
197 197 return (gettext("UNAVAIL"));
198 198 case VDEV_STATE_FAULTED:
199 199 return (gettext("FAULTED"));
200 200 case VDEV_STATE_DEGRADED:
201 201 return (gettext("DEGRADED"));
202 202 case VDEV_STATE_HEALTHY:
203 203 return (gettext("ONLINE"));
204 204 }
205 205
206 206 return (gettext("UNKNOWN"));
207 207 }
208 208
209 209 /*
210 210 * Get a zpool property value for 'prop' and return the value in
211 211 * a pre-allocated buffer.
212 212 */
213 213 int
214 214 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
215 215 zprop_source_t *srctype, boolean_t literal)
216 216 {
217 217 uint64_t intval;
218 218 const char *strval;
219 219 zprop_source_t src = ZPROP_SRC_NONE;
220 220 nvlist_t *nvroot;
221 221 vdev_stat_t *vs;
222 222 uint_t vsc;
223 223
224 224 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
225 225 switch (prop) {
226 226 case ZPOOL_PROP_NAME:
227 227 (void) strlcpy(buf, zpool_get_name(zhp), len);
228 228 break;
229 229
230 230 case ZPOOL_PROP_HEALTH:
231 231 (void) strlcpy(buf, "FAULTED", len);
232 232 break;
233 233
234 234 case ZPOOL_PROP_GUID:
235 235 intval = zpool_get_prop_int(zhp, prop, &src);
236 236 (void) snprintf(buf, len, "%llu", intval);
237 237 break;
238 238
239 239 case ZPOOL_PROP_ALTROOT:
240 240 case ZPOOL_PROP_CACHEFILE:
241 241 case ZPOOL_PROP_COMMENT:
242 242 if (zhp->zpool_props != NULL ||
243 243 zpool_get_all_props(zhp) == 0) {
244 244 (void) strlcpy(buf,
245 245 zpool_get_prop_string(zhp, prop, &src),
246 246 len);
247 247 break;
248 248 }
249 249 /* FALLTHROUGH */
250 250 default:
251 251 (void) strlcpy(buf, "-", len);
252 252 break;
253 253 }
254 254
255 255 if (srctype != NULL)
256 256 *srctype = src;
257 257 return (0);
258 258 }
259 259
260 260 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
261 261 prop != ZPOOL_PROP_NAME)
262 262 return (-1);
263 263
264 264 switch (zpool_prop_get_type(prop)) {
265 265 case PROP_TYPE_STRING:
266 266 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
267 267 len);
268 268 break;
269 269
270 270 case PROP_TYPE_NUMBER:
271 271 intval = zpool_get_prop_int(zhp, prop, &src);
272 272
273 273 switch (prop) {
274 274 case ZPOOL_PROP_SIZE:
275 275 case ZPOOL_PROP_ALLOCATED:
276 276 case ZPOOL_PROP_FREE:
277 277 case ZPOOL_PROP_FREEING:
278 278 case ZPOOL_PROP_LEAKED:
279 279 if (literal) {
280 280 (void) snprintf(buf, len, "%llu",
281 281 (u_longlong_t)intval);
282 282 } else {
283 283 (void) zfs_nicenum(intval, buf, len);
284 284 }
285 285 break;
286 286 case ZPOOL_PROP_EXPANDSZ:
287 287 if (intval == 0) {
288 288 (void) strlcpy(buf, "-", len);
289 289 } else if (literal) {
290 290 (void) snprintf(buf, len, "%llu",
291 291 (u_longlong_t)intval);
292 292 } else {
293 293 (void) zfs_nicenum(intval, buf, len);
294 294 }
295 295 break;
296 296 case ZPOOL_PROP_CAPACITY:
297 297 if (literal) {
298 298 (void) snprintf(buf, len, "%llu",
299 299 (u_longlong_t)intval);
300 300 } else {
301 301 (void) snprintf(buf, len, "%llu%%",
302 302 (u_longlong_t)intval);
303 303 }
304 304 break;
305 305 case ZPOOL_PROP_FRAGMENTATION:
306 306 if (intval == UINT64_MAX) {
307 307 (void) strlcpy(buf, "-", len);
308 308 } else {
309 309 (void) snprintf(buf, len, "%llu%%",
310 310 (u_longlong_t)intval);
311 311 }
312 312 break;
313 313 case ZPOOL_PROP_DEDUPRATIO:
314 314 (void) snprintf(buf, len, "%llu.%02llux",
315 315 (u_longlong_t)(intval / 100),
316 316 (u_longlong_t)(intval % 100));
317 317 break;
318 318 case ZPOOL_PROP_HEALTH:
319 319 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
320 320 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
321 321 verify(nvlist_lookup_uint64_array(nvroot,
322 322 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
323 323 == 0);
324 324
325 325 (void) strlcpy(buf, zpool_state_to_name(intval,
326 326 vs->vs_aux), len);
327 327 break;
328 328 case ZPOOL_PROP_VERSION:
329 329 if (intval >= SPA_VERSION_FEATURES) {
330 330 (void) snprintf(buf, len, "-");
331 331 break;
332 332 }
333 333 /* FALLTHROUGH */
334 334 default:
335 335 (void) snprintf(buf, len, "%llu", intval);
336 336 }
337 337 break;
338 338
339 339 case PROP_TYPE_INDEX:
340 340 intval = zpool_get_prop_int(zhp, prop, &src);
341 341 if (zpool_prop_index_to_string(prop, intval, &strval)
342 342 != 0)
343 343 return (-1);
344 344 (void) strlcpy(buf, strval, len);
345 345 break;
346 346
347 347 default:
348 348 abort();
349 349 }
350 350
351 351 if (srctype)
352 352 *srctype = src;
353 353
354 354 return (0);
355 355 }
356 356
357 357 /*
358 358 * Check if the bootfs name has the same pool name as it is set to.
359 359 * Assuming bootfs is a valid dataset name.
360 360 */
361 361 static boolean_t
362 362 bootfs_name_valid(const char *pool, char *bootfs)
363 363 {
364 364 int len = strlen(pool);
365 365
366 366 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
367 367 return (B_FALSE);
368 368
369 369 if (strncmp(pool, bootfs, len) == 0 &&
370 370 (bootfs[len] == '/' || bootfs[len] == '\0'))
371 371 return (B_TRUE);
372 372
373 373 return (B_FALSE);
374 374 }
375 375
376 376 boolean_t
377 377 zpool_is_bootable(zpool_handle_t *zhp)
378 378 {
379 379 char bootfs[ZPOOL_MAXNAMELEN];
380 380
381 381 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
382 382 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
383 383 sizeof (bootfs)) != 0);
384 384 }
385 385
386 386
387 387 /*
388 388 * Given an nvlist of zpool properties to be set, validate that they are
389 389 * correct, and parse any numeric properties (index, boolean, etc) if they are
390 390 * specified as strings.
391 391 */
392 392 static nvlist_t *
393 393 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
394 394 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
395 395 {
396 396 nvpair_t *elem;
397 397 nvlist_t *retprops;
398 398 zpool_prop_t prop;
399 399 char *strval;
400 400 uint64_t intval;
401 401 char *slash, *check;
402 402 struct stat64 statbuf;
403 403 zpool_handle_t *zhp;
404 404
405 405 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
406 406 (void) no_memory(hdl);
407 407 return (NULL);
408 408 }
409 409
410 410 elem = NULL;
411 411 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
412 412 const char *propname = nvpair_name(elem);
413 413
414 414 prop = zpool_name_to_prop(propname);
415 415 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
416 416 int err;
417 417 char *fname = strchr(propname, '@') + 1;
418 418
419 419 err = zfeature_lookup_name(fname, NULL);
420 420 if (err != 0) {
421 421 ASSERT3U(err, ==, ENOENT);
422 422 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
423 423 "invalid feature '%s'"), fname);
424 424 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
425 425 goto error;
426 426 }
427 427
428 428 if (nvpair_type(elem) != DATA_TYPE_STRING) {
429 429 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
430 430 "'%s' must be a string"), propname);
431 431 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
432 432 goto error;
433 433 }
434 434
435 435 (void) nvpair_value_string(elem, &strval);
436 436 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
437 437 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
438 438 "property '%s' can only be set to "
439 439 "'enabled'"), propname);
440 440 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
441 441 goto error;
442 442 }
443 443
444 444 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
445 445 (void) no_memory(hdl);
446 446 goto error;
447 447 }
448 448 continue;
449 449 }
450 450
451 451 /*
452 452 * Make sure this property is valid and applies to this type.
453 453 */
454 454 if (prop == ZPROP_INVAL) {
455 455 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
456 456 "invalid property '%s'"), propname);
457 457 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
458 458 goto error;
459 459 }
460 460
461 461 if (zpool_prop_readonly(prop)) {
462 462 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
463 463 "is readonly"), propname);
464 464 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
465 465 goto error;
466 466 }
467 467
468 468 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
469 469 &strval, &intval, errbuf) != 0)
470 470 goto error;
471 471
472 472 /*
473 473 * Perform additional checking for specific properties.
474 474 */
475 475 switch (prop) {
476 476 case ZPOOL_PROP_VERSION:
477 477 if (intval < version ||
478 478 !SPA_VERSION_IS_SUPPORTED(intval)) {
479 479 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
480 480 "property '%s' number %d is invalid."),
481 481 propname, intval);
482 482 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
483 483 goto error;
484 484 }
485 485 break;
486 486
487 487 case ZPOOL_PROP_BOOTFS:
488 488 if (flags.create || flags.import) {
489 489 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
490 490 "property '%s' cannot be set at creation "
491 491 "or import time"), propname);
492 492 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
493 493 goto error;
494 494 }
495 495
496 496 if (version < SPA_VERSION_BOOTFS) {
497 497 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
498 498 "pool must be upgraded to support "
499 499 "'%s' property"), propname);
500 500 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
501 501 goto error;
502 502 }
503 503
504 504 /*
505 505 * bootfs property value has to be a dataset name and
506 506 * the dataset has to be in the same pool as it sets to.
507 507 */
508 508 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
509 509 strval)) {
510 510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
511 511 "is an invalid name"), strval);
512 512 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
513 513 goto error;
514 514 }
515 515
516 516 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
517 517 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
518 518 "could not open pool '%s'"), poolname);
519 519 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
520 520 goto error;
521 521 }
522 522 zpool_close(zhp);
523 523 break;
524 524
525 525 case ZPOOL_PROP_ALTROOT:
526 526 if (!flags.create && !flags.import) {
527 527 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
528 528 "property '%s' can only be set during pool "
529 529 "creation or import"), propname);
530 530 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
531 531 goto error;
532 532 }
533 533
534 534 if (strval[0] != '/') {
535 535 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
536 536 "bad alternate root '%s'"), strval);
537 537 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
538 538 goto error;
539 539 }
540 540 break;
541 541
542 542 case ZPOOL_PROP_CACHEFILE:
543 543 if (strval[0] == '\0')
544 544 break;
545 545
546 546 if (strcmp(strval, "none") == 0)
547 547 break;
548 548
549 549 if (strval[0] != '/') {
550 550 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
551 551 "property '%s' must be empty, an "
552 552 "absolute path, or 'none'"), propname);
553 553 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
554 554 goto error;
555 555 }
556 556
557 557 slash = strrchr(strval, '/');
558 558
559 559 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
560 560 strcmp(slash, "/..") == 0) {
561 561 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
562 562 "'%s' is not a valid file"), strval);
563 563 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
564 564 goto error;
565 565 }
566 566
567 567 *slash = '\0';
568 568
569 569 if (strval[0] != '\0' &&
570 570 (stat64(strval, &statbuf) != 0 ||
571 571 !S_ISDIR(statbuf.st_mode))) {
572 572 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
573 573 "'%s' is not a valid directory"),
574 574 strval);
575 575 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
576 576 goto error;
577 577 }
578 578
579 579 *slash = '/';
580 580 break;
581 581
582 582 case ZPOOL_PROP_COMMENT:
583 583 for (check = strval; *check != '\0'; check++) {
584 584 if (!isprint(*check)) {
585 585 zfs_error_aux(hdl,
586 586 dgettext(TEXT_DOMAIN,
587 587 "comment may only have printable "
588 588 "characters"));
589 589 (void) zfs_error(hdl, EZFS_BADPROP,
590 590 errbuf);
591 591 goto error;
592 592 }
593 593 }
594 594 if (strlen(strval) > ZPROP_MAX_COMMENT) {
595 595 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
596 596 "comment must not exceed %d characters"),
597 597 ZPROP_MAX_COMMENT);
598 598 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
599 599 goto error;
600 600 }
601 601 break;
602 602 case ZPOOL_PROP_READONLY:
603 603 if (!flags.import) {
604 604 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
605 605 "property '%s' can only be set at "
606 606 "import time"), propname);
607 607 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
608 608 goto error;
609 609 }
610 610 break;
611 611 }
612 612 }
613 613
614 614 return (retprops);
615 615 error:
616 616 nvlist_free(retprops);
617 617 return (NULL);
618 618 }
619 619
620 620 /*
621 621 * Set zpool property : propname=propval.
622 622 */
623 623 int
624 624 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
625 625 {
626 626 zfs_cmd_t zc = { 0 };
627 627 int ret = -1;
628 628 char errbuf[1024];
629 629 nvlist_t *nvl = NULL;
630 630 nvlist_t *realprops;
631 631 uint64_t version;
632 632 prop_flags_t flags = { 0 };
633 633
634 634 (void) snprintf(errbuf, sizeof (errbuf),
635 635 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
636 636 zhp->zpool_name);
637 637
638 638 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
639 639 return (no_memory(zhp->zpool_hdl));
640 640
641 641 if (nvlist_add_string(nvl, propname, propval) != 0) {
642 642 nvlist_free(nvl);
643 643 return (no_memory(zhp->zpool_hdl));
644 644 }
645 645
646 646 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
647 647 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
648 648 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
649 649 nvlist_free(nvl);
650 650 return (-1);
651 651 }
652 652
653 653 nvlist_free(nvl);
654 654 nvl = realprops;
655 655
656 656 /*
657 657 * Execute the corresponding ioctl() to set this property.
658 658 */
659 659 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
660 660
661 661 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
662 662 nvlist_free(nvl);
663 663 return (-1);
664 664 }
665 665
666 666 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
667 667
668 668 zcmd_free_nvlists(&zc);
669 669 nvlist_free(nvl);
670 670
671 671 if (ret)
672 672 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
673 673 else
674 674 (void) zpool_props_refresh(zhp);
675 675
676 676 return (ret);
677 677 }
678 678
679 679 int
680 680 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
681 681 {
682 682 libzfs_handle_t *hdl = zhp->zpool_hdl;
683 683 zprop_list_t *entry;
684 684 char buf[ZFS_MAXPROPLEN];
685 685 nvlist_t *features = NULL;
686 686 zprop_list_t **last;
687 687 boolean_t firstexpand = (NULL == *plp);
688 688
689 689 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
690 690 return (-1);
691 691
692 692 last = plp;
693 693 while (*last != NULL)
694 694 last = &(*last)->pl_next;
695 695
696 696 if ((*plp)->pl_all)
697 697 features = zpool_get_features(zhp);
698 698
699 699 if ((*plp)->pl_all && firstexpand) {
700 700 for (int i = 0; i < SPA_FEATURES; i++) {
701 701 zprop_list_t *entry = zfs_alloc(hdl,
702 702 sizeof (zprop_list_t));
703 703 entry->pl_prop = ZPROP_INVAL;
704 704 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
705 705 spa_feature_table[i].fi_uname);
706 706 entry->pl_width = strlen(entry->pl_user_prop);
707 707 entry->pl_all = B_TRUE;
708 708
709 709 *last = entry;
710 710 last = &entry->pl_next;
711 711 }
712 712 }
713 713
714 714 /* add any unsupported features */
715 715 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
716 716 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
717 717 char *propname;
718 718 boolean_t found;
719 719 zprop_list_t *entry;
720 720
721 721 if (zfeature_is_supported(nvpair_name(nvp)))
722 722 continue;
723 723
724 724 propname = zfs_asprintf(hdl, "unsupported@%s",
725 725 nvpair_name(nvp));
726 726
727 727 /*
728 728 * Before adding the property to the list make sure that no
729 729 * other pool already added the same property.
730 730 */
731 731 found = B_FALSE;
732 732 entry = *plp;
733 733 while (entry != NULL) {
734 734 if (entry->pl_user_prop != NULL &&
735 735 strcmp(propname, entry->pl_user_prop) == 0) {
736 736 found = B_TRUE;
737 737 break;
738 738 }
739 739 entry = entry->pl_next;
740 740 }
741 741 if (found) {
742 742 free(propname);
743 743 continue;
744 744 }
745 745
746 746 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
747 747 entry->pl_prop = ZPROP_INVAL;
748 748 entry->pl_user_prop = propname;
749 749 entry->pl_width = strlen(entry->pl_user_prop);
750 750 entry->pl_all = B_TRUE;
751 751
752 752 *last = entry;
753 753 last = &entry->pl_next;
754 754 }
755 755
756 756 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
757 757
758 758 if (entry->pl_fixed)
759 759 continue;
760 760
761 761 if (entry->pl_prop != ZPROP_INVAL &&
762 762 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
763 763 NULL, B_FALSE) == 0) {
764 764 if (strlen(buf) > entry->pl_width)
765 765 entry->pl_width = strlen(buf);
766 766 }
767 767 }
768 768
769 769 return (0);
770 770 }
771 771
772 772 /*
773 773 * Get the state for the given feature on the given ZFS pool.
774 774 */
775 775 int
776 776 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
777 777 size_t len)
778 778 {
779 779 uint64_t refcount;
780 780 boolean_t found = B_FALSE;
781 781 nvlist_t *features = zpool_get_features(zhp);
782 782 boolean_t supported;
783 783 const char *feature = strchr(propname, '@') + 1;
784 784
785 785 supported = zpool_prop_feature(propname);
786 786 ASSERT(supported || zfs_prop_unsupported(propname));
787 787
788 788 /*
789 789 * Convert from feature name to feature guid. This conversion is
790 790 * unecessary for unsupported@... properties because they already
791 791 * use guids.
792 792 */
793 793 if (supported) {
794 794 int ret;
795 795 spa_feature_t fid;
796 796
797 797 ret = zfeature_lookup_name(feature, &fid);
798 798 if (ret != 0) {
799 799 (void) strlcpy(buf, "-", len);
800 800 return (ENOTSUP);
801 801 }
802 802 feature = spa_feature_table[fid].fi_guid;
803 803 }
804 804
805 805 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
806 806 found = B_TRUE;
807 807
808 808 if (supported) {
809 809 if (!found) {
810 810 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
811 811 } else {
812 812 if (refcount == 0)
813 813 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
814 814 else
815 815 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
816 816 }
817 817 } else {
818 818 if (found) {
819 819 if (refcount == 0) {
820 820 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
821 821 } else {
822 822 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
823 823 }
824 824 } else {
825 825 (void) strlcpy(buf, "-", len);
826 826 return (ENOTSUP);
827 827 }
828 828 }
829 829
830 830 return (0);
831 831 }
832 832
833 833 /*
834 834 * Don't start the slice at the default block of 34; many storage
835 835 * devices will use a stripe width of 128k, so start there instead.
836 836 */
837 837 #define NEW_START_BLOCK 256
838 838
839 839 /*
840 840 * Validate the given pool name, optionally putting an extended error message in
841 841 * 'buf'.
842 842 */
843 843 boolean_t
844 844 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
845 845 {
846 846 namecheck_err_t why;
847 847 char what;
848 848 int ret;
849 849
850 850 ret = pool_namecheck(pool, &why, &what);
851 851
852 852 /*
853 853 * The rules for reserved pool names were extended at a later point.
854 854 * But we need to support users with existing pools that may now be
855 855 * invalid. So we only check for this expanded set of names during a
856 856 * create (or import), and only in userland.
857 857 */
858 858 if (ret == 0 && !isopen &&
859 859 (strncmp(pool, "mirror", 6) == 0 ||
860 860 strncmp(pool, "raidz", 5) == 0 ||
861 861 strncmp(pool, "spare", 5) == 0 ||
862 862 strcmp(pool, "log") == 0)) {
863 863 if (hdl != NULL)
864 864 zfs_error_aux(hdl,
865 865 dgettext(TEXT_DOMAIN, "name is reserved"));
866 866 return (B_FALSE);
867 867 }
868 868
869 869
870 870 if (ret != 0) {
871 871 if (hdl != NULL) {
872 872 switch (why) {
873 873 case NAME_ERR_TOOLONG:
874 874 zfs_error_aux(hdl,
875 875 dgettext(TEXT_DOMAIN, "name is too long"));
876 876 break;
877 877
878 878 case NAME_ERR_INVALCHAR:
879 879 zfs_error_aux(hdl,
880 880 dgettext(TEXT_DOMAIN, "invalid character "
881 881 "'%c' in pool name"), what);
882 882 break;
883 883
884 884 case NAME_ERR_NOLETTER:
885 885 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
886 886 "name must begin with a letter"));
887 887 break;
888 888
889 889 case NAME_ERR_RESERVED:
890 890 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
891 891 "name is reserved"));
892 892 break;
893 893
894 894 case NAME_ERR_DISKLIKE:
895 895 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
896 896 "pool name is reserved"));
897 897 break;
898 898
899 899 case NAME_ERR_LEADING_SLASH:
900 900 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
901 901 "leading slash in name"));
902 902 break;
903 903
904 904 case NAME_ERR_EMPTY_COMPONENT:
905 905 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
906 906 "empty component in name"));
907 907 break;
908 908
909 909 case NAME_ERR_TRAILING_SLASH:
910 910 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
911 911 "trailing slash in name"));
912 912 break;
913 913
914 914 case NAME_ERR_MULTIPLE_AT:
915 915 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
916 916 "multiple '@' delimiters in name"));
917 917 break;
918 918
919 919 }
920 920 }
921 921 return (B_FALSE);
922 922 }
923 923
924 924 return (B_TRUE);
925 925 }
926 926
927 927 /*
928 928 * Open a handle to the given pool, even if the pool is currently in the FAULTED
929 929 * state.
930 930 */
931 931 zpool_handle_t *
932 932 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
933 933 {
934 934 zpool_handle_t *zhp;
935 935 boolean_t missing;
936 936
937 937 /*
938 938 * Make sure the pool name is valid.
939 939 */
940 940 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
941 941 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
942 942 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
943 943 pool);
944 944 return (NULL);
945 945 }
946 946
947 947 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
948 948 return (NULL);
949 949
950 950 zhp->zpool_hdl = hdl;
951 951 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
952 952
953 953 if (zpool_refresh_stats(zhp, &missing) != 0) {
954 954 zpool_close(zhp);
955 955 return (NULL);
956 956 }
957 957
958 958 if (missing) {
959 959 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
960 960 (void) zfs_error_fmt(hdl, EZFS_NOENT,
961 961 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
962 962 zpool_close(zhp);
963 963 return (NULL);
964 964 }
965 965
966 966 return (zhp);
967 967 }
968 968
969 969 /*
970 970 * Like the above, but silent on error. Used when iterating over pools (because
971 971 * the configuration cache may be out of date).
972 972 */
973 973 int
974 974 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
975 975 {
976 976 zpool_handle_t *zhp;
977 977 boolean_t missing;
978 978
979 979 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
980 980 return (-1);
981 981
982 982 zhp->zpool_hdl = hdl;
983 983 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
984 984
985 985 if (zpool_refresh_stats(zhp, &missing) != 0) {
986 986 zpool_close(zhp);
987 987 return (-1);
988 988 }
989 989
990 990 if (missing) {
991 991 zpool_close(zhp);
992 992 *ret = NULL;
993 993 return (0);
994 994 }
995 995
996 996 *ret = zhp;
997 997 return (0);
998 998 }
999 999
1000 1000 /*
1001 1001 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1002 1002 * state.
1003 1003 */
1004 1004 zpool_handle_t *
1005 1005 zpool_open(libzfs_handle_t *hdl, const char *pool)
1006 1006 {
1007 1007 zpool_handle_t *zhp;
1008 1008
1009 1009 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1010 1010 return (NULL);
1011 1011
1012 1012 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1013 1013 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1014 1014 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1015 1015 zpool_close(zhp);
1016 1016 return (NULL);
1017 1017 }
↓ open down ↓ |
1017 lines elided |
↑ open up ↑ |
1018 1018
1019 1019 return (zhp);
1020 1020 }
1021 1021
1022 1022 /*
1023 1023 * Close the handle. Simply frees the memory associated with the handle.
1024 1024 */
1025 1025 void
1026 1026 zpool_close(zpool_handle_t *zhp)
1027 1027 {
1028 - if (zhp->zpool_config)
1029 - nvlist_free(zhp->zpool_config);
1030 - if (zhp->zpool_old_config)
1031 - nvlist_free(zhp->zpool_old_config);
1032 - if (zhp->zpool_props)
1033 - nvlist_free(zhp->zpool_props);
1028 + nvlist_free(zhp->zpool_config);
1029 + nvlist_free(zhp->zpool_old_config);
1030 + nvlist_free(zhp->zpool_props);
1034 1031 free(zhp);
1035 1032 }
1036 1033
1037 1034 /*
1038 1035 * Return the name of the pool.
1039 1036 */
1040 1037 const char *
1041 1038 zpool_get_name(zpool_handle_t *zhp)
1042 1039 {
1043 1040 return (zhp->zpool_name);
1044 1041 }
1045 1042
1046 1043
1047 1044 /*
1048 1045 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1049 1046 */
1050 1047 int
1051 1048 zpool_get_state(zpool_handle_t *zhp)
1052 1049 {
1053 1050 return (zhp->zpool_state);
1054 1051 }
1055 1052
1056 1053 /*
1057 1054 * Create the named pool, using the provided vdev list. It is assumed
1058 1055 * that the consumer has already validated the contents of the nvlist, so we
1059 1056 * don't have to worry about error semantics.
1060 1057 */
1061 1058 int
1062 1059 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1063 1060 nvlist_t *props, nvlist_t *fsprops)
1064 1061 {
1065 1062 zfs_cmd_t zc = { 0 };
1066 1063 nvlist_t *zc_fsprops = NULL;
1067 1064 nvlist_t *zc_props = NULL;
1068 1065 char msg[1024];
1069 1066 int ret = -1;
1070 1067
1071 1068 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1072 1069 "cannot create '%s'"), pool);
1073 1070
1074 1071 if (!zpool_name_valid(hdl, B_FALSE, pool))
1075 1072 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1076 1073
1077 1074 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1078 1075 return (-1);
1079 1076
1080 1077 if (props) {
1081 1078 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1082 1079
1083 1080 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1084 1081 SPA_VERSION_1, flags, msg)) == NULL) {
1085 1082 goto create_failed;
1086 1083 }
1087 1084 }
1088 1085
1089 1086 if (fsprops) {
1090 1087 uint64_t zoned;
1091 1088 char *zonestr;
1092 1089
1093 1090 zoned = ((nvlist_lookup_string(fsprops,
1094 1091 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1095 1092 strcmp(zonestr, "on") == 0);
1096 1093
1097 1094 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
1098 1095 fsprops, zoned, NULL, NULL, msg)) == NULL) {
1099 1096 goto create_failed;
1100 1097 }
1101 1098 if (!zc_props &&
1102 1099 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1103 1100 goto create_failed;
1104 1101 }
1105 1102 if (nvlist_add_nvlist(zc_props,
1106 1103 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1107 1104 goto create_failed;
1108 1105 }
1109 1106 }
1110 1107
1111 1108 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1112 1109 goto create_failed;
1113 1110
1114 1111 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1115 1112
1116 1113 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1117 1114
1118 1115 zcmd_free_nvlists(&zc);
1119 1116 nvlist_free(zc_props);
1120 1117 nvlist_free(zc_fsprops);
1121 1118
1122 1119 switch (errno) {
1123 1120 case EBUSY:
1124 1121 /*
1125 1122 * This can happen if the user has specified the same
1126 1123 * device multiple times. We can't reliably detect this
1127 1124 * until we try to add it and see we already have a
1128 1125 * label.
1129 1126 */
1130 1127 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1131 1128 "one or more vdevs refer to the same device"));
1132 1129 return (zfs_error(hdl, EZFS_BADDEV, msg));
1133 1130
1134 1131 case ERANGE:
1135 1132 /*
1136 1133 * This happens if the record size is smaller or larger
1137 1134 * than the allowed size range, or not a power of 2.
1138 1135 *
1139 1136 * NOTE: although zfs_valid_proplist is called earlier,
1140 1137 * this case may have slipped through since the
1141 1138 * pool does not exist yet and it is therefore
1142 1139 * impossible to read properties e.g. max blocksize
1143 1140 * from the pool.
1144 1141 */
1145 1142 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1146 1143 "record size invalid"));
1147 1144 return (zfs_error(hdl, EZFS_BADPROP, msg));
1148 1145
1149 1146 case EOVERFLOW:
1150 1147 /*
1151 1148 * This occurs when one of the devices is below
1152 1149 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1153 1150 * device was the problem device since there's no
1154 1151 * reliable way to determine device size from userland.
1155 1152 */
1156 1153 {
1157 1154 char buf[64];
1158 1155
1159 1156 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1160 1157
1161 1158 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1162 1159 "one or more devices is less than the "
1163 1160 "minimum size (%s)"), buf);
1164 1161 }
1165 1162 return (zfs_error(hdl, EZFS_BADDEV, msg));
1166 1163
1167 1164 case ENOSPC:
1168 1165 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1169 1166 "one or more devices is out of space"));
1170 1167 return (zfs_error(hdl, EZFS_BADDEV, msg));
1171 1168
1172 1169 case ENOTBLK:
1173 1170 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1174 1171 "cache device must be a disk or disk slice"));
1175 1172 return (zfs_error(hdl, EZFS_BADDEV, msg));
1176 1173
1177 1174 default:
1178 1175 return (zpool_standard_error(hdl, errno, msg));
1179 1176 }
1180 1177 }
1181 1178
1182 1179 create_failed:
1183 1180 zcmd_free_nvlists(&zc);
1184 1181 nvlist_free(zc_props);
1185 1182 nvlist_free(zc_fsprops);
1186 1183 return (ret);
1187 1184 }
1188 1185
1189 1186 /*
1190 1187 * Destroy the given pool. It is up to the caller to ensure that there are no
1191 1188 * datasets left in the pool.
1192 1189 */
1193 1190 int
1194 1191 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1195 1192 {
1196 1193 zfs_cmd_t zc = { 0 };
1197 1194 zfs_handle_t *zfp = NULL;
1198 1195 libzfs_handle_t *hdl = zhp->zpool_hdl;
1199 1196 char msg[1024];
1200 1197
1201 1198 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1202 1199 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1203 1200 return (-1);
1204 1201
1205 1202 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1206 1203 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1207 1204
1208 1205 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1209 1206 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1210 1207 "cannot destroy '%s'"), zhp->zpool_name);
1211 1208
1212 1209 if (errno == EROFS) {
1213 1210 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1214 1211 "one or more devices is read only"));
1215 1212 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1216 1213 } else {
1217 1214 (void) zpool_standard_error(hdl, errno, msg);
1218 1215 }
1219 1216
1220 1217 if (zfp)
1221 1218 zfs_close(zfp);
1222 1219 return (-1);
1223 1220 }
1224 1221
1225 1222 if (zfp) {
1226 1223 remove_mountpoint(zfp);
1227 1224 zfs_close(zfp);
1228 1225 }
1229 1226
1230 1227 return (0);
1231 1228 }
1232 1229
1233 1230 /*
1234 1231 * Add the given vdevs to the pool. The caller must have already performed the
1235 1232 * necessary verification to ensure that the vdev specification is well-formed.
1236 1233 */
1237 1234 int
1238 1235 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1239 1236 {
1240 1237 zfs_cmd_t zc = { 0 };
1241 1238 int ret;
1242 1239 libzfs_handle_t *hdl = zhp->zpool_hdl;
1243 1240 char msg[1024];
1244 1241 nvlist_t **spares, **l2cache;
1245 1242 uint_t nspares, nl2cache;
1246 1243
1247 1244 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1248 1245 "cannot add to '%s'"), zhp->zpool_name);
1249 1246
1250 1247 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1251 1248 SPA_VERSION_SPARES &&
1252 1249 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1253 1250 &spares, &nspares) == 0) {
1254 1251 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1255 1252 "upgraded to add hot spares"));
1256 1253 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1257 1254 }
1258 1255
1259 1256 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1260 1257 SPA_VERSION_L2CACHE &&
1261 1258 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1262 1259 &l2cache, &nl2cache) == 0) {
1263 1260 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1264 1261 "upgraded to add cache devices"));
1265 1262 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1266 1263 }
1267 1264
1268 1265 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1269 1266 return (-1);
1270 1267 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1271 1268
1272 1269 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1273 1270 switch (errno) {
1274 1271 case EBUSY:
1275 1272 /*
1276 1273 * This can happen if the user has specified the same
1277 1274 * device multiple times. We can't reliably detect this
1278 1275 * until we try to add it and see we already have a
1279 1276 * label.
1280 1277 */
1281 1278 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1282 1279 "one or more vdevs refer to the same device"));
1283 1280 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1284 1281 break;
1285 1282
1286 1283 case EOVERFLOW:
1287 1284 /*
1288 1285 * This occurrs when one of the devices is below
1289 1286 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1290 1287 * device was the problem device since there's no
1291 1288 * reliable way to determine device size from userland.
1292 1289 */
1293 1290 {
1294 1291 char buf[64];
1295 1292
1296 1293 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1297 1294
1298 1295 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1299 1296 "device is less than the minimum "
1300 1297 "size (%s)"), buf);
1301 1298 }
1302 1299 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1303 1300 break;
1304 1301
1305 1302 case ENOTSUP:
1306 1303 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1307 1304 "pool must be upgraded to add these vdevs"));
1308 1305 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1309 1306 break;
1310 1307
1311 1308 case EDOM:
1312 1309 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1313 1310 "root pool can not have multiple vdevs"
1314 1311 " or separate logs"));
1315 1312 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1316 1313 break;
1317 1314
1318 1315 case ENOTBLK:
1319 1316 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1320 1317 "cache device must be a disk or disk slice"));
1321 1318 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1322 1319 break;
1323 1320
1324 1321 default:
1325 1322 (void) zpool_standard_error(hdl, errno, msg);
1326 1323 }
1327 1324
1328 1325 ret = -1;
1329 1326 } else {
1330 1327 ret = 0;
1331 1328 }
1332 1329
1333 1330 zcmd_free_nvlists(&zc);
1334 1331
1335 1332 return (ret);
1336 1333 }
1337 1334
1338 1335 /*
1339 1336 * Exports the pool from the system. The caller must ensure that there are no
1340 1337 * mounted datasets in the pool.
1341 1338 */
1342 1339 static int
1343 1340 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1344 1341 const char *log_str)
1345 1342 {
1346 1343 zfs_cmd_t zc = { 0 };
1347 1344 char msg[1024];
1348 1345
1349 1346 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1350 1347 "cannot export '%s'"), zhp->zpool_name);
1351 1348
1352 1349 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1353 1350 zc.zc_cookie = force;
1354 1351 zc.zc_guid = hardforce;
1355 1352 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1356 1353
1357 1354 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1358 1355 switch (errno) {
1359 1356 case EXDEV:
1360 1357 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1361 1358 "use '-f' to override the following errors:\n"
1362 1359 "'%s' has an active shared spare which could be"
1363 1360 " used by other pools once '%s' is exported."),
1364 1361 zhp->zpool_name, zhp->zpool_name);
1365 1362 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1366 1363 msg));
1367 1364 default:
1368 1365 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1369 1366 msg));
1370 1367 }
1371 1368 }
1372 1369
1373 1370 return (0);
1374 1371 }
1375 1372
1376 1373 int
1377 1374 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1378 1375 {
1379 1376 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1380 1377 }
1381 1378
1382 1379 int
1383 1380 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1384 1381 {
1385 1382 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1386 1383 }
1387 1384
1388 1385 static void
1389 1386 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1390 1387 nvlist_t *config)
1391 1388 {
1392 1389 nvlist_t *nv = NULL;
1393 1390 uint64_t rewindto;
1394 1391 int64_t loss = -1;
1395 1392 struct tm t;
1396 1393 char timestr[128];
1397 1394
1398 1395 if (!hdl->libzfs_printerr || config == NULL)
1399 1396 return;
1400 1397
1401 1398 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1402 1399 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1403 1400 return;
1404 1401 }
1405 1402
1406 1403 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1407 1404 return;
1408 1405 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1409 1406
1410 1407 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1411 1408 strftime(timestr, 128, 0, &t) != 0) {
1412 1409 if (dryrun) {
1413 1410 (void) printf(dgettext(TEXT_DOMAIN,
1414 1411 "Would be able to return %s "
1415 1412 "to its state as of %s.\n"),
1416 1413 name, timestr);
1417 1414 } else {
1418 1415 (void) printf(dgettext(TEXT_DOMAIN,
1419 1416 "Pool %s returned to its state as of %s.\n"),
1420 1417 name, timestr);
1421 1418 }
1422 1419 if (loss > 120) {
1423 1420 (void) printf(dgettext(TEXT_DOMAIN,
1424 1421 "%s approximately %lld "),
1425 1422 dryrun ? "Would discard" : "Discarded",
1426 1423 (loss + 30) / 60);
1427 1424 (void) printf(dgettext(TEXT_DOMAIN,
1428 1425 "minutes of transactions.\n"));
1429 1426 } else if (loss > 0) {
1430 1427 (void) printf(dgettext(TEXT_DOMAIN,
1431 1428 "%s approximately %lld "),
1432 1429 dryrun ? "Would discard" : "Discarded", loss);
1433 1430 (void) printf(dgettext(TEXT_DOMAIN,
1434 1431 "seconds of transactions.\n"));
1435 1432 }
1436 1433 }
1437 1434 }
1438 1435
1439 1436 void
1440 1437 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1441 1438 nvlist_t *config)
1442 1439 {
1443 1440 nvlist_t *nv = NULL;
1444 1441 int64_t loss = -1;
1445 1442 uint64_t edata = UINT64_MAX;
1446 1443 uint64_t rewindto;
1447 1444 struct tm t;
1448 1445 char timestr[128];
1449 1446
1450 1447 if (!hdl->libzfs_printerr)
1451 1448 return;
1452 1449
1453 1450 if (reason >= 0)
1454 1451 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1455 1452 else
1456 1453 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1457 1454
1458 1455 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1459 1456 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1460 1457 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1461 1458 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1462 1459 goto no_info;
1463 1460
1464 1461 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1465 1462 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1466 1463 &edata);
1467 1464
1468 1465 (void) printf(dgettext(TEXT_DOMAIN,
1469 1466 "Recovery is possible, but will result in some data loss.\n"));
1470 1467
1471 1468 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1472 1469 strftime(timestr, 128, 0, &t) != 0) {
1473 1470 (void) printf(dgettext(TEXT_DOMAIN,
1474 1471 "\tReturning the pool to its state as of %s\n"
1475 1472 "\tshould correct the problem. "),
1476 1473 timestr);
1477 1474 } else {
1478 1475 (void) printf(dgettext(TEXT_DOMAIN,
1479 1476 "\tReverting the pool to an earlier state "
1480 1477 "should correct the problem.\n\t"));
1481 1478 }
1482 1479
1483 1480 if (loss > 120) {
1484 1481 (void) printf(dgettext(TEXT_DOMAIN,
1485 1482 "Approximately %lld minutes of data\n"
1486 1483 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1487 1484 } else if (loss > 0) {
1488 1485 (void) printf(dgettext(TEXT_DOMAIN,
1489 1486 "Approximately %lld seconds of data\n"
1490 1487 "\tmust be discarded, irreversibly. "), loss);
1491 1488 }
1492 1489 if (edata != 0 && edata != UINT64_MAX) {
1493 1490 if (edata == 1) {
1494 1491 (void) printf(dgettext(TEXT_DOMAIN,
1495 1492 "After rewind, at least\n"
1496 1493 "\tone persistent user-data error will remain. "));
1497 1494 } else {
1498 1495 (void) printf(dgettext(TEXT_DOMAIN,
1499 1496 "After rewind, several\n"
1500 1497 "\tpersistent user-data errors will remain. "));
1501 1498 }
1502 1499 }
1503 1500 (void) printf(dgettext(TEXT_DOMAIN,
1504 1501 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1505 1502 reason >= 0 ? "clear" : "import", name);
1506 1503
1507 1504 (void) printf(dgettext(TEXT_DOMAIN,
1508 1505 "A scrub of the pool\n"
1509 1506 "\tis strongly recommended after recovery.\n"));
1510 1507 return;
1511 1508
1512 1509 no_info:
1513 1510 (void) printf(dgettext(TEXT_DOMAIN,
1514 1511 "Destroy and re-create the pool from\n\ta backup source.\n"));
1515 1512 }
1516 1513
1517 1514 /*
1518 1515 * zpool_import() is a contracted interface. Should be kept the same
1519 1516 * if possible.
1520 1517 *
1521 1518 * Applications should use zpool_import_props() to import a pool with
1522 1519 * new properties value to be set.
1523 1520 */
1524 1521 int
1525 1522 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1526 1523 char *altroot)
1527 1524 {
1528 1525 nvlist_t *props = NULL;
1529 1526 int ret;
1530 1527
1531 1528 if (altroot != NULL) {
1532 1529 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1533 1530 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1534 1531 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1535 1532 newname));
1536 1533 }
1537 1534
1538 1535 if (nvlist_add_string(props,
1539 1536 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1540 1537 nvlist_add_string(props,
↓ open down ↓ |
497 lines elided |
↑ open up ↑ |
1541 1538 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1542 1539 nvlist_free(props);
1543 1540 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1544 1541 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1545 1542 newname));
1546 1543 }
1547 1544 }
1548 1545
1549 1546 ret = zpool_import_props(hdl, config, newname, props,
1550 1547 ZFS_IMPORT_NORMAL);
1551 - if (props)
1552 - nvlist_free(props);
1548 + nvlist_free(props);
1553 1549 return (ret);
1554 1550 }
1555 1551
1556 1552 static void
1557 1553 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1558 1554 int indent)
1559 1555 {
1560 1556 nvlist_t **child;
1561 1557 uint_t c, children;
1562 1558 char *vname;
1563 1559 uint64_t is_log = 0;
1564 1560
1565 1561 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1566 1562 &is_log);
1567 1563
1568 1564 if (name != NULL)
1569 1565 (void) printf("\t%*s%s%s\n", indent, "", name,
1570 1566 is_log ? " [log]" : "");
1571 1567
1572 1568 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1573 1569 &child, &children) != 0)
1574 1570 return;
1575 1571
1576 1572 for (c = 0; c < children; c++) {
1577 1573 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1578 1574 print_vdev_tree(hdl, vname, child[c], indent + 2);
1579 1575 free(vname);
1580 1576 }
1581 1577 }
1582 1578
1583 1579 void
1584 1580 zpool_print_unsup_feat(nvlist_t *config)
1585 1581 {
1586 1582 nvlist_t *nvinfo, *unsup_feat;
1587 1583
1588 1584 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1589 1585 0);
1590 1586 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1591 1587 &unsup_feat) == 0);
1592 1588
1593 1589 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1594 1590 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1595 1591 char *desc;
1596 1592
1597 1593 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1598 1594 verify(nvpair_value_string(nvp, &desc) == 0);
1599 1595
1600 1596 if (strlen(desc) > 0)
1601 1597 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1602 1598 else
1603 1599 (void) printf("\t%s\n", nvpair_name(nvp));
1604 1600 }
1605 1601 }
1606 1602
1607 1603 /*
1608 1604 * Import the given pool using the known configuration and a list of
1609 1605 * properties to be set. The configuration should have come from
1610 1606 * zpool_find_import(). The 'newname' parameters control whether the pool
1611 1607 * is imported with a different name.
1612 1608 */
1613 1609 int
1614 1610 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1615 1611 nvlist_t *props, int flags)
1616 1612 {
1617 1613 zfs_cmd_t zc = { 0 };
1618 1614 zpool_rewind_policy_t policy;
1619 1615 nvlist_t *nv = NULL;
1620 1616 nvlist_t *nvinfo = NULL;
1621 1617 nvlist_t *missing = NULL;
1622 1618 char *thename;
1623 1619 char *origname;
1624 1620 int ret;
1625 1621 int error = 0;
1626 1622 char errbuf[1024];
1627 1623
1628 1624 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1629 1625 &origname) == 0);
1630 1626
1631 1627 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1632 1628 "cannot import pool '%s'"), origname);
1633 1629
1634 1630 if (newname != NULL) {
1635 1631 if (!zpool_name_valid(hdl, B_FALSE, newname))
1636 1632 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1637 1633 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1638 1634 newname));
1639 1635 thename = (char *)newname;
1640 1636 } else {
1641 1637 thename = origname;
1642 1638 }
1643 1639
1644 1640 if (props != NULL) {
1645 1641 uint64_t version;
1646 1642 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1647 1643
1648 1644 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1649 1645 &version) == 0);
1650 1646
1651 1647 if ((props = zpool_valid_proplist(hdl, origname,
1652 1648 props, version, flags, errbuf)) == NULL)
1653 1649 return (-1);
1654 1650 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1655 1651 nvlist_free(props);
1656 1652 return (-1);
1657 1653 }
1658 1654 nvlist_free(props);
1659 1655 }
1660 1656
1661 1657 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1662 1658
1663 1659 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1664 1660 &zc.zc_guid) == 0);
1665 1661
1666 1662 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1667 1663 zcmd_free_nvlists(&zc);
1668 1664 return (-1);
1669 1665 }
1670 1666 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1671 1667 zcmd_free_nvlists(&zc);
1672 1668 return (-1);
1673 1669 }
1674 1670
1675 1671 zc.zc_cookie = flags;
1676 1672 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1677 1673 errno == ENOMEM) {
1678 1674 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1679 1675 zcmd_free_nvlists(&zc);
1680 1676 return (-1);
1681 1677 }
1682 1678 }
1683 1679 if (ret != 0)
1684 1680 error = errno;
1685 1681
1686 1682 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1687 1683
1688 1684 zcmd_free_nvlists(&zc);
1689 1685
1690 1686 zpool_get_rewind_policy(config, &policy);
1691 1687
1692 1688 if (error) {
1693 1689 char desc[1024];
1694 1690
1695 1691 /*
1696 1692 * Dry-run failed, but we print out what success
1697 1693 * looks like if we found a best txg
1698 1694 */
1699 1695 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1700 1696 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1701 1697 B_TRUE, nv);
1702 1698 nvlist_free(nv);
1703 1699 return (-1);
1704 1700 }
1705 1701
1706 1702 if (newname == NULL)
1707 1703 (void) snprintf(desc, sizeof (desc),
1708 1704 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1709 1705 thename);
1710 1706 else
1711 1707 (void) snprintf(desc, sizeof (desc),
1712 1708 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1713 1709 origname, thename);
1714 1710
1715 1711 switch (error) {
1716 1712 case ENOTSUP:
1717 1713 if (nv != NULL && nvlist_lookup_nvlist(nv,
1718 1714 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1719 1715 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1720 1716 (void) printf(dgettext(TEXT_DOMAIN, "This "
1721 1717 "pool uses the following feature(s) not "
1722 1718 "supported by this system:\n"));
1723 1719 zpool_print_unsup_feat(nv);
1724 1720 if (nvlist_exists(nvinfo,
1725 1721 ZPOOL_CONFIG_CAN_RDONLY)) {
1726 1722 (void) printf(dgettext(TEXT_DOMAIN,
1727 1723 "All unsupported features are only "
1728 1724 "required for writing to the pool."
1729 1725 "\nThe pool can be imported using "
1730 1726 "'-o readonly=on'.\n"));
1731 1727 }
1732 1728 }
1733 1729 /*
1734 1730 * Unsupported version.
1735 1731 */
1736 1732 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1737 1733 break;
1738 1734
1739 1735 case EINVAL:
1740 1736 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1741 1737 break;
1742 1738
1743 1739 case EROFS:
1744 1740 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1745 1741 "one or more devices is read only"));
1746 1742 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1747 1743 break;
1748 1744
1749 1745 case ENXIO:
1750 1746 if (nv && nvlist_lookup_nvlist(nv,
1751 1747 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1752 1748 nvlist_lookup_nvlist(nvinfo,
1753 1749 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1754 1750 (void) printf(dgettext(TEXT_DOMAIN,
1755 1751 "The devices below are missing, use "
1756 1752 "'-m' to import the pool anyway:\n"));
1757 1753 print_vdev_tree(hdl, NULL, missing, 2);
1758 1754 (void) printf("\n");
1759 1755 }
1760 1756 (void) zpool_standard_error(hdl, error, desc);
1761 1757 break;
1762 1758
1763 1759 case EEXIST:
1764 1760 (void) zpool_standard_error(hdl, error, desc);
1765 1761 break;
1766 1762
1767 1763 default:
1768 1764 (void) zpool_standard_error(hdl, error, desc);
1769 1765 zpool_explain_recover(hdl,
1770 1766 newname ? origname : thename, -error, nv);
1771 1767 break;
1772 1768 }
1773 1769
1774 1770 nvlist_free(nv);
1775 1771 ret = -1;
1776 1772 } else {
1777 1773 zpool_handle_t *zhp;
1778 1774
1779 1775 /*
1780 1776 * This should never fail, but play it safe anyway.
1781 1777 */
1782 1778 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1783 1779 ret = -1;
1784 1780 else if (zhp != NULL)
1785 1781 zpool_close(zhp);
1786 1782 if (policy.zrp_request &
1787 1783 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1788 1784 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1789 1785 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1790 1786 }
1791 1787 nvlist_free(nv);
1792 1788 return (0);
1793 1789 }
1794 1790
1795 1791 return (ret);
1796 1792 }
1797 1793
1798 1794 /*
1799 1795 * Scan the pool.
1800 1796 */
1801 1797 int
1802 1798 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1803 1799 {
1804 1800 zfs_cmd_t zc = { 0 };
1805 1801 char msg[1024];
1806 1802 libzfs_handle_t *hdl = zhp->zpool_hdl;
1807 1803
1808 1804 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1809 1805 zc.zc_cookie = func;
1810 1806
1811 1807 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1812 1808 (errno == ENOENT && func != POOL_SCAN_NONE))
1813 1809 return (0);
1814 1810
1815 1811 if (func == POOL_SCAN_SCRUB) {
1816 1812 (void) snprintf(msg, sizeof (msg),
1817 1813 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1818 1814 } else if (func == POOL_SCAN_NONE) {
1819 1815 (void) snprintf(msg, sizeof (msg),
1820 1816 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1821 1817 zc.zc_name);
1822 1818 } else {
1823 1819 assert(!"unexpected result");
1824 1820 }
1825 1821
1826 1822 if (errno == EBUSY) {
1827 1823 nvlist_t *nvroot;
1828 1824 pool_scan_stat_t *ps = NULL;
1829 1825 uint_t psc;
1830 1826
1831 1827 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1832 1828 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1833 1829 (void) nvlist_lookup_uint64_array(nvroot,
1834 1830 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1835 1831 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1836 1832 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1837 1833 else
1838 1834 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1839 1835 } else if (errno == ENOENT) {
1840 1836 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1841 1837 } else {
1842 1838 return (zpool_standard_error(hdl, errno, msg));
1843 1839 }
1844 1840 }
1845 1841
1846 1842 /*
1847 1843 * This provides a very minimal check whether a given string is likely a
1848 1844 * c#t#d# style string. Users of this are expected to do their own
1849 1845 * verification of the s# part.
1850 1846 */
1851 1847 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1852 1848
1853 1849 /*
1854 1850 * More elaborate version for ones which may start with "/dev/dsk/"
1855 1851 * and the like.
1856 1852 */
1857 1853 static int
1858 1854 ctd_check_path(char *str)
1859 1855 {
1860 1856 /*
1861 1857 * If it starts with a slash, check the last component.
1862 1858 */
1863 1859 if (str && str[0] == '/') {
1864 1860 char *tmp = strrchr(str, '/');
1865 1861
1866 1862 /*
1867 1863 * If it ends in "/old", check the second-to-last
1868 1864 * component of the string instead.
1869 1865 */
1870 1866 if (tmp != str && strcmp(tmp, "/old") == 0) {
1871 1867 for (tmp--; *tmp != '/'; tmp--)
1872 1868 ;
1873 1869 }
1874 1870 str = tmp + 1;
1875 1871 }
1876 1872 return (CTD_CHECK(str));
1877 1873 }
1878 1874
1879 1875 /*
1880 1876 * Find a vdev that matches the search criteria specified. We use the
1881 1877 * the nvpair name to determine how we should look for the device.
1882 1878 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1883 1879 * spare; but FALSE if its an INUSE spare.
1884 1880 */
1885 1881 static nvlist_t *
1886 1882 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1887 1883 boolean_t *l2cache, boolean_t *log)
1888 1884 {
1889 1885 uint_t c, children;
1890 1886 nvlist_t **child;
1891 1887 nvlist_t *ret;
1892 1888 uint64_t is_log;
1893 1889 char *srchkey;
1894 1890 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1895 1891
1896 1892 /* Nothing to look for */
1897 1893 if (search == NULL || pair == NULL)
1898 1894 return (NULL);
1899 1895
1900 1896 /* Obtain the key we will use to search */
1901 1897 srchkey = nvpair_name(pair);
1902 1898
1903 1899 switch (nvpair_type(pair)) {
1904 1900 case DATA_TYPE_UINT64:
1905 1901 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1906 1902 uint64_t srchval, theguid;
1907 1903
1908 1904 verify(nvpair_value_uint64(pair, &srchval) == 0);
1909 1905 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1910 1906 &theguid) == 0);
1911 1907 if (theguid == srchval)
1912 1908 return (nv);
1913 1909 }
1914 1910 break;
1915 1911
1916 1912 case DATA_TYPE_STRING: {
1917 1913 char *srchval, *val;
1918 1914
1919 1915 verify(nvpair_value_string(pair, &srchval) == 0);
1920 1916 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1921 1917 break;
1922 1918
1923 1919 /*
1924 1920 * Search for the requested value. Special cases:
1925 1921 *
1926 1922 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1927 1923 * "s0" or "s0/old". The "s0" part is hidden from the user,
1928 1924 * but included in the string, so this matches around it.
1929 1925 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1930 1926 *
1931 1927 * Otherwise, all other searches are simple string compares.
1932 1928 */
1933 1929 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1934 1930 ctd_check_path(val)) {
1935 1931 uint64_t wholedisk = 0;
1936 1932
1937 1933 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1938 1934 &wholedisk);
1939 1935 if (wholedisk) {
1940 1936 int slen = strlen(srchval);
1941 1937 int vlen = strlen(val);
1942 1938
1943 1939 if (slen != vlen - 2)
1944 1940 break;
1945 1941
1946 1942 /*
1947 1943 * make_leaf_vdev() should only set
1948 1944 * wholedisk for ZPOOL_CONFIG_PATHs which
1949 1945 * will include "/dev/dsk/", giving plenty of
1950 1946 * room for the indices used next.
1951 1947 */
1952 1948 ASSERT(vlen >= 6);
1953 1949
1954 1950 /*
1955 1951 * strings identical except trailing "s0"
1956 1952 */
1957 1953 if (strcmp(&val[vlen - 2], "s0") == 0 &&
1958 1954 strncmp(srchval, val, slen) == 0)
1959 1955 return (nv);
1960 1956
1961 1957 /*
1962 1958 * strings identical except trailing "s0/old"
1963 1959 */
1964 1960 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
1965 1961 strcmp(&srchval[slen - 4], "/old") == 0 &&
1966 1962 strncmp(srchval, val, slen - 4) == 0)
1967 1963 return (nv);
1968 1964
1969 1965 break;
1970 1966 }
1971 1967 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
1972 1968 char *type, *idx, *end, *p;
1973 1969 uint64_t id, vdev_id;
1974 1970
1975 1971 /*
1976 1972 * Determine our vdev type, keeping in mind
1977 1973 * that the srchval is composed of a type and
1978 1974 * vdev id pair (i.e. mirror-4).
1979 1975 */
1980 1976 if ((type = strdup(srchval)) == NULL)
1981 1977 return (NULL);
1982 1978
1983 1979 if ((p = strrchr(type, '-')) == NULL) {
1984 1980 free(type);
1985 1981 break;
1986 1982 }
1987 1983 idx = p + 1;
1988 1984 *p = '\0';
1989 1985
1990 1986 /*
1991 1987 * If the types don't match then keep looking.
1992 1988 */
1993 1989 if (strncmp(val, type, strlen(val)) != 0) {
1994 1990 free(type);
1995 1991 break;
1996 1992 }
1997 1993
1998 1994 verify(strncmp(type, VDEV_TYPE_RAIDZ,
1999 1995 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2000 1996 strncmp(type, VDEV_TYPE_MIRROR,
2001 1997 strlen(VDEV_TYPE_MIRROR)) == 0);
2002 1998 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2003 1999 &id) == 0);
2004 2000
2005 2001 errno = 0;
2006 2002 vdev_id = strtoull(idx, &end, 10);
2007 2003
2008 2004 free(type);
2009 2005 if (errno != 0)
2010 2006 return (NULL);
2011 2007
2012 2008 /*
2013 2009 * Now verify that we have the correct vdev id.
2014 2010 */
2015 2011 if (vdev_id == id)
2016 2012 return (nv);
2017 2013 }
2018 2014
2019 2015 /*
2020 2016 * Common case
2021 2017 */
2022 2018 if (strcmp(srchval, val) == 0)
2023 2019 return (nv);
2024 2020 break;
2025 2021 }
2026 2022
2027 2023 default:
2028 2024 break;
2029 2025 }
2030 2026
2031 2027 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2032 2028 &child, &children) != 0)
2033 2029 return (NULL);
2034 2030
2035 2031 for (c = 0; c < children; c++) {
2036 2032 if ((ret = vdev_to_nvlist_iter(child[c], search,
2037 2033 avail_spare, l2cache, NULL)) != NULL) {
2038 2034 /*
2039 2035 * The 'is_log' value is only set for the toplevel
2040 2036 * vdev, not the leaf vdevs. So we always lookup the
2041 2037 * log device from the root of the vdev tree (where
2042 2038 * 'log' is non-NULL).
2043 2039 */
2044 2040 if (log != NULL &&
2045 2041 nvlist_lookup_uint64(child[c],
2046 2042 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2047 2043 is_log) {
2048 2044 *log = B_TRUE;
2049 2045 }
2050 2046 return (ret);
2051 2047 }
2052 2048 }
2053 2049
2054 2050 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2055 2051 &child, &children) == 0) {
2056 2052 for (c = 0; c < children; c++) {
2057 2053 if ((ret = vdev_to_nvlist_iter(child[c], search,
2058 2054 avail_spare, l2cache, NULL)) != NULL) {
2059 2055 *avail_spare = B_TRUE;
2060 2056 return (ret);
2061 2057 }
2062 2058 }
2063 2059 }
2064 2060
2065 2061 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2066 2062 &child, &children) == 0) {
2067 2063 for (c = 0; c < children; c++) {
2068 2064 if ((ret = vdev_to_nvlist_iter(child[c], search,
2069 2065 avail_spare, l2cache, NULL)) != NULL) {
2070 2066 *l2cache = B_TRUE;
2071 2067 return (ret);
2072 2068 }
2073 2069 }
2074 2070 }
2075 2071
2076 2072 return (NULL);
2077 2073 }
2078 2074
2079 2075 /*
2080 2076 * Given a physical path (minus the "/devices" prefix), find the
2081 2077 * associated vdev.
2082 2078 */
2083 2079 nvlist_t *
2084 2080 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2085 2081 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2086 2082 {
2087 2083 nvlist_t *search, *nvroot, *ret;
2088 2084
2089 2085 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2090 2086 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2091 2087
2092 2088 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2093 2089 &nvroot) == 0);
2094 2090
2095 2091 *avail_spare = B_FALSE;
2096 2092 *l2cache = B_FALSE;
2097 2093 if (log != NULL)
2098 2094 *log = B_FALSE;
2099 2095 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2100 2096 nvlist_free(search);
2101 2097
2102 2098 return (ret);
2103 2099 }
2104 2100
2105 2101 /*
2106 2102 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2107 2103 */
2108 2104 boolean_t
2109 2105 zpool_vdev_is_interior(const char *name)
2110 2106 {
2111 2107 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2112 2108 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2113 2109 return (B_TRUE);
2114 2110 return (B_FALSE);
2115 2111 }
2116 2112
2117 2113 nvlist_t *
2118 2114 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2119 2115 boolean_t *l2cache, boolean_t *log)
2120 2116 {
2121 2117 char buf[MAXPATHLEN];
2122 2118 char *end;
2123 2119 nvlist_t *nvroot, *search, *ret;
2124 2120 uint64_t guid;
2125 2121
2126 2122 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2127 2123
2128 2124 guid = strtoull(path, &end, 10);
2129 2125 if (guid != 0 && *end == '\0') {
2130 2126 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2131 2127 } else if (zpool_vdev_is_interior(path)) {
2132 2128 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2133 2129 } else if (path[0] != '/') {
2134 2130 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
2135 2131 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2136 2132 } else {
2137 2133 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2138 2134 }
2139 2135
2140 2136 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2141 2137 &nvroot) == 0);
2142 2138
2143 2139 *avail_spare = B_FALSE;
2144 2140 *l2cache = B_FALSE;
2145 2141 if (log != NULL)
2146 2142 *log = B_FALSE;
2147 2143 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2148 2144 nvlist_free(search);
2149 2145
2150 2146 return (ret);
2151 2147 }
2152 2148
2153 2149 static int
2154 2150 vdev_online(nvlist_t *nv)
2155 2151 {
2156 2152 uint64_t ival;
2157 2153
2158 2154 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2159 2155 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2160 2156 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2161 2157 return (0);
2162 2158
2163 2159 return (1);
2164 2160 }
2165 2161
2166 2162 /*
2167 2163 * Helper function for zpool_get_physpaths().
2168 2164 */
2169 2165 static int
2170 2166 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2171 2167 size_t *bytes_written)
2172 2168 {
2173 2169 size_t bytes_left, pos, rsz;
2174 2170 char *tmppath;
2175 2171 const char *format;
2176 2172
2177 2173 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2178 2174 &tmppath) != 0)
2179 2175 return (EZFS_NODEVICE);
2180 2176
2181 2177 pos = *bytes_written;
2182 2178 bytes_left = physpath_size - pos;
2183 2179 format = (pos == 0) ? "%s" : " %s";
2184 2180
2185 2181 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2186 2182 *bytes_written += rsz;
2187 2183
2188 2184 if (rsz >= bytes_left) {
2189 2185 /* if physpath was not copied properly, clear it */
2190 2186 if (bytes_left != 0) {
2191 2187 physpath[pos] = 0;
2192 2188 }
2193 2189 return (EZFS_NOSPC);
2194 2190 }
2195 2191 return (0);
2196 2192 }
2197 2193
2198 2194 static int
2199 2195 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2200 2196 size_t *rsz, boolean_t is_spare)
2201 2197 {
2202 2198 char *type;
2203 2199 int ret;
2204 2200
2205 2201 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2206 2202 return (EZFS_INVALCONFIG);
2207 2203
2208 2204 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2209 2205 /*
2210 2206 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2211 2207 * For a spare vdev, we only want to boot from the active
2212 2208 * spare device.
2213 2209 */
2214 2210 if (is_spare) {
2215 2211 uint64_t spare = 0;
2216 2212 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2217 2213 &spare);
2218 2214 if (!spare)
2219 2215 return (EZFS_INVALCONFIG);
2220 2216 }
2221 2217
2222 2218 if (vdev_online(nv)) {
2223 2219 if ((ret = vdev_get_one_physpath(nv, physpath,
2224 2220 phypath_size, rsz)) != 0)
2225 2221 return (ret);
2226 2222 }
2227 2223 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2228 2224 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2229 2225 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2230 2226 nvlist_t **child;
2231 2227 uint_t count;
2232 2228 int i, ret;
2233 2229
2234 2230 if (nvlist_lookup_nvlist_array(nv,
2235 2231 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2236 2232 return (EZFS_INVALCONFIG);
2237 2233
2238 2234 for (i = 0; i < count; i++) {
2239 2235 ret = vdev_get_physpaths(child[i], physpath,
2240 2236 phypath_size, rsz, is_spare);
2241 2237 if (ret == EZFS_NOSPC)
2242 2238 return (ret);
2243 2239 }
2244 2240 }
2245 2241
2246 2242 return (EZFS_POOL_INVALARG);
2247 2243 }
2248 2244
2249 2245 /*
2250 2246 * Get phys_path for a root pool config.
2251 2247 * Return 0 on success; non-zero on failure.
2252 2248 */
2253 2249 static int
2254 2250 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2255 2251 {
2256 2252 size_t rsz;
2257 2253 nvlist_t *vdev_root;
2258 2254 nvlist_t **child;
2259 2255 uint_t count;
2260 2256 char *type;
2261 2257
2262 2258 rsz = 0;
2263 2259
2264 2260 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2265 2261 &vdev_root) != 0)
2266 2262 return (EZFS_INVALCONFIG);
2267 2263
2268 2264 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2269 2265 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2270 2266 &child, &count) != 0)
2271 2267 return (EZFS_INVALCONFIG);
2272 2268
2273 2269 /*
2274 2270 * root pool can only have a single top-level vdev.
2275 2271 */
2276 2272 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
2277 2273 return (EZFS_POOL_INVALARG);
2278 2274
2279 2275 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2280 2276 B_FALSE);
2281 2277
2282 2278 /* No online devices */
2283 2279 if (rsz == 0)
2284 2280 return (EZFS_NODEVICE);
2285 2281
2286 2282 return (0);
2287 2283 }
2288 2284
2289 2285 /*
2290 2286 * Get phys_path for a root pool
2291 2287 * Return 0 on success; non-zero on failure.
2292 2288 */
2293 2289 int
2294 2290 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2295 2291 {
2296 2292 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2297 2293 phypath_size));
2298 2294 }
2299 2295
2300 2296 /*
2301 2297 * If the device has being dynamically expanded then we need to relabel
2302 2298 * the disk to use the new unallocated space.
2303 2299 */
2304 2300 static int
2305 2301 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2306 2302 {
2307 2303 char path[MAXPATHLEN];
2308 2304 char errbuf[1024];
2309 2305 int fd, error;
2310 2306 int (*_efi_use_whole_disk)(int);
2311 2307
2312 2308 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2313 2309 "efi_use_whole_disk")) == NULL)
2314 2310 return (-1);
2315 2311
2316 2312 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2317 2313
2318 2314 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2319 2315 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2320 2316 "relabel '%s': unable to open device"), name);
2321 2317 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2322 2318 }
2323 2319
2324 2320 /*
2325 2321 * It's possible that we might encounter an error if the device
2326 2322 * does not have any unallocated space left. If so, we simply
2327 2323 * ignore that error and continue on.
2328 2324 */
2329 2325 error = _efi_use_whole_disk(fd);
2330 2326 (void) close(fd);
2331 2327 if (error && error != VT_ENOSPC) {
2332 2328 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2333 2329 "relabel '%s': unable to read disk capacity"), name);
2334 2330 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2335 2331 }
2336 2332 return (0);
2337 2333 }
2338 2334
2339 2335 /*
2340 2336 * Bring the specified vdev online. The 'flags' parameter is a set of the
2341 2337 * ZFS_ONLINE_* flags.
2342 2338 */
2343 2339 int
2344 2340 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2345 2341 vdev_state_t *newstate)
2346 2342 {
2347 2343 zfs_cmd_t zc = { 0 };
2348 2344 char msg[1024];
2349 2345 nvlist_t *tgt;
2350 2346 boolean_t avail_spare, l2cache, islog;
2351 2347 libzfs_handle_t *hdl = zhp->zpool_hdl;
2352 2348
2353 2349 if (flags & ZFS_ONLINE_EXPAND) {
2354 2350 (void) snprintf(msg, sizeof (msg),
2355 2351 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2356 2352 } else {
2357 2353 (void) snprintf(msg, sizeof (msg),
2358 2354 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2359 2355 }
2360 2356
2361 2357 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2362 2358 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2363 2359 &islog)) == NULL)
2364 2360 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2365 2361
2366 2362 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2367 2363
2368 2364 if (avail_spare)
2369 2365 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2370 2366
2371 2367 if (flags & ZFS_ONLINE_EXPAND ||
2372 2368 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2373 2369 char *pathname = NULL;
2374 2370 uint64_t wholedisk = 0;
2375 2371
2376 2372 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2377 2373 &wholedisk);
2378 2374 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2379 2375 &pathname) == 0);
2380 2376
2381 2377 /*
2382 2378 * XXX - L2ARC 1.0 devices can't support expansion.
2383 2379 */
2384 2380 if (l2cache) {
2385 2381 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2386 2382 "cannot expand cache devices"));
2387 2383 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2388 2384 }
2389 2385
2390 2386 if (wholedisk) {
2391 2387 pathname += strlen(DISK_ROOT) + 1;
2392 2388 (void) zpool_relabel_disk(hdl, pathname);
2393 2389 }
2394 2390 }
2395 2391
2396 2392 zc.zc_cookie = VDEV_STATE_ONLINE;
2397 2393 zc.zc_obj = flags;
2398 2394
2399 2395 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2400 2396 if (errno == EINVAL) {
2401 2397 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2402 2398 "from this pool into a new one. Use '%s' "
2403 2399 "instead"), "zpool detach");
2404 2400 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2405 2401 }
2406 2402 return (zpool_standard_error(hdl, errno, msg));
2407 2403 }
2408 2404
2409 2405 *newstate = zc.zc_cookie;
2410 2406 return (0);
2411 2407 }
2412 2408
2413 2409 /*
2414 2410 * Take the specified vdev offline
2415 2411 */
2416 2412 int
2417 2413 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2418 2414 {
2419 2415 zfs_cmd_t zc = { 0 };
2420 2416 char msg[1024];
2421 2417 nvlist_t *tgt;
2422 2418 boolean_t avail_spare, l2cache;
2423 2419 libzfs_handle_t *hdl = zhp->zpool_hdl;
2424 2420
2425 2421 (void) snprintf(msg, sizeof (msg),
2426 2422 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2427 2423
2428 2424 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2429 2425 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2430 2426 NULL)) == NULL)
2431 2427 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2432 2428
2433 2429 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2434 2430
2435 2431 if (avail_spare)
2436 2432 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2437 2433
2438 2434 zc.zc_cookie = VDEV_STATE_OFFLINE;
2439 2435 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2440 2436
2441 2437 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2442 2438 return (0);
2443 2439
2444 2440 switch (errno) {
2445 2441 case EBUSY:
2446 2442
2447 2443 /*
2448 2444 * There are no other replicas of this device.
2449 2445 */
2450 2446 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2451 2447
2452 2448 case EEXIST:
2453 2449 /*
2454 2450 * The log device has unplayed logs
2455 2451 */
2456 2452 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2457 2453
2458 2454 default:
2459 2455 return (zpool_standard_error(hdl, errno, msg));
2460 2456 }
2461 2457 }
2462 2458
2463 2459 /*
2464 2460 * Mark the given vdev faulted.
2465 2461 */
2466 2462 int
2467 2463 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2468 2464 {
2469 2465 zfs_cmd_t zc = { 0 };
2470 2466 char msg[1024];
2471 2467 libzfs_handle_t *hdl = zhp->zpool_hdl;
2472 2468
2473 2469 (void) snprintf(msg, sizeof (msg),
2474 2470 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2475 2471
2476 2472 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2477 2473 zc.zc_guid = guid;
2478 2474 zc.zc_cookie = VDEV_STATE_FAULTED;
2479 2475 zc.zc_obj = aux;
2480 2476
2481 2477 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2482 2478 return (0);
2483 2479
2484 2480 switch (errno) {
2485 2481 case EBUSY:
2486 2482
2487 2483 /*
2488 2484 * There are no other replicas of this device.
2489 2485 */
2490 2486 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2491 2487
2492 2488 default:
2493 2489 return (zpool_standard_error(hdl, errno, msg));
2494 2490 }
2495 2491
2496 2492 }
2497 2493
2498 2494 /*
2499 2495 * Mark the given vdev degraded.
2500 2496 */
2501 2497 int
2502 2498 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2503 2499 {
2504 2500 zfs_cmd_t zc = { 0 };
2505 2501 char msg[1024];
2506 2502 libzfs_handle_t *hdl = zhp->zpool_hdl;
2507 2503
2508 2504 (void) snprintf(msg, sizeof (msg),
2509 2505 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2510 2506
2511 2507 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2512 2508 zc.zc_guid = guid;
2513 2509 zc.zc_cookie = VDEV_STATE_DEGRADED;
2514 2510 zc.zc_obj = aux;
2515 2511
2516 2512 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2517 2513 return (0);
2518 2514
2519 2515 return (zpool_standard_error(hdl, errno, msg));
2520 2516 }
2521 2517
2522 2518 /*
2523 2519 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2524 2520 * a hot spare.
2525 2521 */
2526 2522 static boolean_t
2527 2523 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2528 2524 {
2529 2525 nvlist_t **child;
2530 2526 uint_t c, children;
2531 2527 char *type;
2532 2528
2533 2529 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2534 2530 &children) == 0) {
2535 2531 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2536 2532 &type) == 0);
2537 2533
2538 2534 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2539 2535 children == 2 && child[which] == tgt)
2540 2536 return (B_TRUE);
2541 2537
2542 2538 for (c = 0; c < children; c++)
2543 2539 if (is_replacing_spare(child[c], tgt, which))
2544 2540 return (B_TRUE);
2545 2541 }
2546 2542
2547 2543 return (B_FALSE);
2548 2544 }
2549 2545
2550 2546 /*
2551 2547 * Attach new_disk (fully described by nvroot) to old_disk.
2552 2548 * If 'replacing' is specified, the new disk will replace the old one.
2553 2549 */
2554 2550 int
2555 2551 zpool_vdev_attach(zpool_handle_t *zhp,
2556 2552 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2557 2553 {
2558 2554 zfs_cmd_t zc = { 0 };
2559 2555 char msg[1024];
2560 2556 int ret;
2561 2557 nvlist_t *tgt;
2562 2558 boolean_t avail_spare, l2cache, islog;
2563 2559 uint64_t val;
2564 2560 char *newname;
2565 2561 nvlist_t **child;
2566 2562 uint_t children;
2567 2563 nvlist_t *config_root;
2568 2564 libzfs_handle_t *hdl = zhp->zpool_hdl;
2569 2565 boolean_t rootpool = zpool_is_bootable(zhp);
2570 2566
2571 2567 if (replacing)
2572 2568 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2573 2569 "cannot replace %s with %s"), old_disk, new_disk);
2574 2570 else
2575 2571 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2576 2572 "cannot attach %s to %s"), new_disk, old_disk);
2577 2573
2578 2574 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2579 2575 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2580 2576 &islog)) == 0)
2581 2577 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2582 2578
2583 2579 if (avail_spare)
2584 2580 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2585 2581
2586 2582 if (l2cache)
2587 2583 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2588 2584
2589 2585 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2590 2586 zc.zc_cookie = replacing;
2591 2587
2592 2588 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2593 2589 &child, &children) != 0 || children != 1) {
2594 2590 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2595 2591 "new device must be a single disk"));
2596 2592 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2597 2593 }
2598 2594
2599 2595 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2600 2596 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2601 2597
2602 2598 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2603 2599 return (-1);
2604 2600
2605 2601 /*
2606 2602 * If the target is a hot spare that has been swapped in, we can only
2607 2603 * replace it with another hot spare.
2608 2604 */
2609 2605 if (replacing &&
2610 2606 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2611 2607 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2612 2608 NULL) == NULL || !avail_spare) &&
2613 2609 is_replacing_spare(config_root, tgt, 1)) {
2614 2610 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2615 2611 "can only be replaced by another hot spare"));
2616 2612 free(newname);
2617 2613 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2618 2614 }
2619 2615
2620 2616 free(newname);
2621 2617
2622 2618 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2623 2619 return (-1);
2624 2620
2625 2621 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2626 2622
2627 2623 zcmd_free_nvlists(&zc);
2628 2624
2629 2625 if (ret == 0) {
2630 2626 if (rootpool) {
2631 2627 /*
2632 2628 * XXX need a better way to prevent user from
2633 2629 * booting up a half-baked vdev.
2634 2630 */
2635 2631 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2636 2632 "sure to wait until resilver is done "
2637 2633 "before rebooting.\n"));
2638 2634 }
2639 2635 return (0);
2640 2636 }
2641 2637
2642 2638 switch (errno) {
2643 2639 case ENOTSUP:
2644 2640 /*
2645 2641 * Can't attach to or replace this type of vdev.
2646 2642 */
2647 2643 if (replacing) {
2648 2644 uint64_t version = zpool_get_prop_int(zhp,
2649 2645 ZPOOL_PROP_VERSION, NULL);
2650 2646
2651 2647 if (islog)
2652 2648 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2653 2649 "cannot replace a log with a spare"));
2654 2650 else if (version >= SPA_VERSION_MULTI_REPLACE)
2655 2651 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2656 2652 "already in replacing/spare config; wait "
2657 2653 "for completion or use 'zpool detach'"));
2658 2654 else
2659 2655 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2660 2656 "cannot replace a replacing device"));
2661 2657 } else {
2662 2658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2663 2659 "can only attach to mirrors and top-level "
2664 2660 "disks"));
2665 2661 }
2666 2662 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2667 2663 break;
2668 2664
2669 2665 case EINVAL:
2670 2666 /*
2671 2667 * The new device must be a single disk.
2672 2668 */
2673 2669 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2674 2670 "new device must be a single disk"));
2675 2671 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2676 2672 break;
2677 2673
2678 2674 case EBUSY:
2679 2675 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2680 2676 new_disk);
2681 2677 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2682 2678 break;
2683 2679
2684 2680 case EOVERFLOW:
2685 2681 /*
2686 2682 * The new device is too small.
2687 2683 */
2688 2684 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2689 2685 "device is too small"));
2690 2686 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2691 2687 break;
2692 2688
2693 2689 case EDOM:
2694 2690 /*
2695 2691 * The new device has a different alignment requirement.
2696 2692 */
2697 2693 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2698 2694 "devices have different sector alignment"));
2699 2695 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2700 2696 break;
2701 2697
2702 2698 case ENAMETOOLONG:
2703 2699 /*
2704 2700 * The resulting top-level vdev spec won't fit in the label.
2705 2701 */
2706 2702 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2707 2703 break;
2708 2704
2709 2705 default:
2710 2706 (void) zpool_standard_error(hdl, errno, msg);
2711 2707 }
2712 2708
2713 2709 return (-1);
2714 2710 }
2715 2711
2716 2712 /*
2717 2713 * Detach the specified device.
2718 2714 */
2719 2715 int
2720 2716 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2721 2717 {
2722 2718 zfs_cmd_t zc = { 0 };
2723 2719 char msg[1024];
2724 2720 nvlist_t *tgt;
2725 2721 boolean_t avail_spare, l2cache;
2726 2722 libzfs_handle_t *hdl = zhp->zpool_hdl;
2727 2723
2728 2724 (void) snprintf(msg, sizeof (msg),
2729 2725 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2730 2726
2731 2727 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2732 2728 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2733 2729 NULL)) == 0)
2734 2730 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2735 2731
2736 2732 if (avail_spare)
2737 2733 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2738 2734
2739 2735 if (l2cache)
2740 2736 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2741 2737
2742 2738 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2743 2739
2744 2740 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2745 2741 return (0);
2746 2742
2747 2743 switch (errno) {
2748 2744
2749 2745 case ENOTSUP:
2750 2746 /*
2751 2747 * Can't detach from this type of vdev.
2752 2748 */
2753 2749 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2754 2750 "applicable to mirror and replacing vdevs"));
2755 2751 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2756 2752 break;
2757 2753
2758 2754 case EBUSY:
2759 2755 /*
2760 2756 * There are no other replicas of this device.
2761 2757 */
2762 2758 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2763 2759 break;
2764 2760
2765 2761 default:
2766 2762 (void) zpool_standard_error(hdl, errno, msg);
2767 2763 }
2768 2764
2769 2765 return (-1);
2770 2766 }
2771 2767
2772 2768 /*
2773 2769 * Find a mirror vdev in the source nvlist.
2774 2770 *
2775 2771 * The mchild array contains a list of disks in one of the top-level mirrors
2776 2772 * of the source pool. The schild array contains a list of disks that the
2777 2773 * user specified on the command line. We loop over the mchild array to
2778 2774 * see if any entry in the schild array matches.
2779 2775 *
2780 2776 * If a disk in the mchild array is found in the schild array, we return
2781 2777 * the index of that entry. Otherwise we return -1.
2782 2778 */
2783 2779 static int
2784 2780 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2785 2781 nvlist_t **schild, uint_t schildren)
2786 2782 {
2787 2783 uint_t mc;
2788 2784
2789 2785 for (mc = 0; mc < mchildren; mc++) {
2790 2786 uint_t sc;
2791 2787 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2792 2788 mchild[mc], B_FALSE);
2793 2789
2794 2790 for (sc = 0; sc < schildren; sc++) {
2795 2791 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2796 2792 schild[sc], B_FALSE);
2797 2793 boolean_t result = (strcmp(mpath, spath) == 0);
2798 2794
2799 2795 free(spath);
2800 2796 if (result) {
2801 2797 free(mpath);
2802 2798 return (mc);
2803 2799 }
2804 2800 }
2805 2801
2806 2802 free(mpath);
2807 2803 }
2808 2804
2809 2805 return (-1);
2810 2806 }
2811 2807
2812 2808 /*
2813 2809 * Split a mirror pool. If newroot points to null, then a new nvlist
2814 2810 * is generated and it is the responsibility of the caller to free it.
2815 2811 */
2816 2812 int
2817 2813 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2818 2814 nvlist_t *props, splitflags_t flags)
2819 2815 {
2820 2816 zfs_cmd_t zc = { 0 };
2821 2817 char msg[1024];
2822 2818 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2823 2819 nvlist_t **varray = NULL, *zc_props = NULL;
2824 2820 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2825 2821 libzfs_handle_t *hdl = zhp->zpool_hdl;
2826 2822 uint64_t vers;
2827 2823 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2828 2824 int retval = 0;
2829 2825
2830 2826 (void) snprintf(msg, sizeof (msg),
2831 2827 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2832 2828
2833 2829 if (!zpool_name_valid(hdl, B_FALSE, newname))
2834 2830 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2835 2831
2836 2832 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2837 2833 (void) fprintf(stderr, gettext("Internal error: unable to "
2838 2834 "retrieve pool configuration\n"));
2839 2835 return (-1);
2840 2836 }
2841 2837
2842 2838 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2843 2839 == 0);
2844 2840 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2845 2841
2846 2842 if (props) {
↓ open down ↓ |
1284 lines elided |
↑ open up ↑ |
2847 2843 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2848 2844 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2849 2845 props, vers, flags, msg)) == NULL)
2850 2846 return (-1);
2851 2847 }
2852 2848
2853 2849 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2854 2850 &children) != 0) {
2855 2851 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2856 2852 "Source pool is missing vdev tree"));
2857 - if (zc_props)
2858 - nvlist_free(zc_props);
2853 + nvlist_free(zc_props);
2859 2854 return (-1);
2860 2855 }
2861 2856
2862 2857 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2863 2858 vcount = 0;
2864 2859
2865 2860 if (*newroot == NULL ||
2866 2861 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2867 2862 &newchild, &newchildren) != 0)
2868 2863 newchildren = 0;
2869 2864
2870 2865 for (c = 0; c < children; c++) {
2871 2866 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2872 2867 char *type;
2873 2868 nvlist_t **mchild, *vdev;
2874 2869 uint_t mchildren;
2875 2870 int entry;
2876 2871
2877 2872 /*
2878 2873 * Unlike cache & spares, slogs are stored in the
2879 2874 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2880 2875 */
2881 2876 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2882 2877 &is_log);
2883 2878 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2884 2879 &is_hole);
2885 2880 if (is_log || is_hole) {
2886 2881 /*
2887 2882 * Create a hole vdev and put it in the config.
2888 2883 */
2889 2884 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2890 2885 goto out;
2891 2886 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2892 2887 VDEV_TYPE_HOLE) != 0)
2893 2888 goto out;
2894 2889 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2895 2890 1) != 0)
2896 2891 goto out;
2897 2892 if (lastlog == 0)
2898 2893 lastlog = vcount;
2899 2894 varray[vcount++] = vdev;
2900 2895 continue;
2901 2896 }
2902 2897 lastlog = 0;
2903 2898 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2904 2899 == 0);
2905 2900 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2906 2901 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2907 2902 "Source pool must be composed only of mirrors\n"));
2908 2903 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2909 2904 goto out;
2910 2905 }
2911 2906
2912 2907 verify(nvlist_lookup_nvlist_array(child[c],
2913 2908 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2914 2909
2915 2910 /* find or add an entry for this top-level vdev */
2916 2911 if (newchildren > 0 &&
2917 2912 (entry = find_vdev_entry(zhp, mchild, mchildren,
2918 2913 newchild, newchildren)) >= 0) {
2919 2914 /* We found a disk that the user specified. */
2920 2915 vdev = mchild[entry];
2921 2916 ++found;
2922 2917 } else {
2923 2918 /* User didn't specify a disk for this vdev. */
2924 2919 vdev = mchild[mchildren - 1];
2925 2920 }
2926 2921
2927 2922 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2928 2923 goto out;
2929 2924 }
2930 2925
2931 2926 /* did we find every disk the user specified? */
2932 2927 if (found != newchildren) {
2933 2928 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2934 2929 "include at most one disk from each mirror"));
2935 2930 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2936 2931 goto out;
2937 2932 }
2938 2933
2939 2934 /* Prepare the nvlist for populating. */
2940 2935 if (*newroot == NULL) {
2941 2936 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2942 2937 goto out;
2943 2938 freelist = B_TRUE;
2944 2939 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2945 2940 VDEV_TYPE_ROOT) != 0)
2946 2941 goto out;
2947 2942 } else {
2948 2943 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2949 2944 }
2950 2945
2951 2946 /* Add all the children we found */
2952 2947 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2953 2948 lastlog == 0 ? vcount : lastlog) != 0)
2954 2949 goto out;
2955 2950
2956 2951 /*
2957 2952 * If we're just doing a dry run, exit now with success.
2958 2953 */
2959 2954 if (flags.dryrun) {
2960 2955 memory_err = B_FALSE;
2961 2956 freelist = B_FALSE;
2962 2957 goto out;
2963 2958 }
2964 2959
2965 2960 /* now build up the config list & call the ioctl */
2966 2961 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
2967 2962 goto out;
2968 2963
2969 2964 if (nvlist_add_nvlist(newconfig,
2970 2965 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
2971 2966 nvlist_add_string(newconfig,
2972 2967 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
2973 2968 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
2974 2969 goto out;
2975 2970
2976 2971 /*
2977 2972 * The new pool is automatically part of the namespace unless we
2978 2973 * explicitly export it.
2979 2974 */
2980 2975 if (!flags.import)
2981 2976 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
2982 2977 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2983 2978 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
2984 2979 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
2985 2980 goto out;
2986 2981 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
2987 2982 goto out;
2988 2983
2989 2984 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
2990 2985 retval = zpool_standard_error(hdl, errno, msg);
2991 2986 goto out;
2992 2987 }
2993 2988
2994 2989 freelist = B_FALSE;
2995 2990 memory_err = B_FALSE;
↓ open down ↓ |
127 lines elided |
↑ open up ↑ |
2996 2991
2997 2992 out:
2998 2993 if (varray != NULL) {
2999 2994 int v;
3000 2995
3001 2996 for (v = 0; v < vcount; v++)
3002 2997 nvlist_free(varray[v]);
3003 2998 free(varray);
3004 2999 }
3005 3000 zcmd_free_nvlists(&zc);
3006 - if (zc_props)
3007 - nvlist_free(zc_props);
3008 - if (newconfig)
3009 - nvlist_free(newconfig);
3001 + nvlist_free(zc_props);
3002 + nvlist_free(newconfig);
3010 3003 if (freelist) {
3011 3004 nvlist_free(*newroot);
3012 3005 *newroot = NULL;
3013 3006 }
3014 3007
3015 3008 if (retval != 0)
3016 3009 return (retval);
3017 3010
3018 3011 if (memory_err)
3019 3012 return (no_memory(hdl));
3020 3013
3021 3014 return (0);
3022 3015 }
3023 3016
3024 3017 /*
3025 3018 * Remove the given device. Currently, this is supported only for hot spares
3026 3019 * and level 2 cache devices.
3027 3020 */
3028 3021 int
3029 3022 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3030 3023 {
3031 3024 zfs_cmd_t zc = { 0 };
3032 3025 char msg[1024];
3033 3026 nvlist_t *tgt;
3034 3027 boolean_t avail_spare, l2cache, islog;
3035 3028 libzfs_handle_t *hdl = zhp->zpool_hdl;
3036 3029 uint64_t version;
3037 3030
3038 3031 (void) snprintf(msg, sizeof (msg),
3039 3032 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3040 3033
3041 3034 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3042 3035 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3043 3036 &islog)) == 0)
3044 3037 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3045 3038 /*
3046 3039 * XXX - this should just go away.
3047 3040 */
3048 3041 if (!avail_spare && !l2cache && !islog) {
3049 3042 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3050 3043 "only inactive hot spares, cache, top-level, "
3051 3044 "or log devices can be removed"));
3052 3045 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3053 3046 }
3054 3047
3055 3048 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3056 3049 if (islog && version < SPA_VERSION_HOLES) {
3057 3050 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3058 3051 "pool must be upgrade to support log removal"));
3059 3052 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3060 3053 }
3061 3054
3062 3055 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3063 3056
3064 3057 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3065 3058 return (0);
3066 3059
3067 3060 return (zpool_standard_error(hdl, errno, msg));
3068 3061 }
3069 3062
3070 3063 /*
3071 3064 * Clear the errors for the pool, or the particular device if specified.
3072 3065 */
3073 3066 int
3074 3067 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3075 3068 {
3076 3069 zfs_cmd_t zc = { 0 };
3077 3070 char msg[1024];
3078 3071 nvlist_t *tgt;
3079 3072 zpool_rewind_policy_t policy;
3080 3073 boolean_t avail_spare, l2cache;
3081 3074 libzfs_handle_t *hdl = zhp->zpool_hdl;
3082 3075 nvlist_t *nvi = NULL;
3083 3076 int error;
3084 3077
3085 3078 if (path)
3086 3079 (void) snprintf(msg, sizeof (msg),
3087 3080 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3088 3081 path);
3089 3082 else
3090 3083 (void) snprintf(msg, sizeof (msg),
3091 3084 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3092 3085 zhp->zpool_name);
3093 3086
3094 3087 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3095 3088 if (path) {
3096 3089 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3097 3090 &l2cache, NULL)) == 0)
3098 3091 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3099 3092
3100 3093 /*
3101 3094 * Don't allow error clearing for hot spares. Do allow
3102 3095 * error clearing for l2cache devices.
3103 3096 */
3104 3097 if (avail_spare)
3105 3098 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3106 3099
3107 3100 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3108 3101 &zc.zc_guid) == 0);
3109 3102 }
3110 3103
3111 3104 zpool_get_rewind_policy(rewindnvl, &policy);
3112 3105 zc.zc_cookie = policy.zrp_request;
3113 3106
3114 3107 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3115 3108 return (-1);
3116 3109
3117 3110 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3118 3111 return (-1);
3119 3112
3120 3113 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3121 3114 errno == ENOMEM) {
3122 3115 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3123 3116 zcmd_free_nvlists(&zc);
3124 3117 return (-1);
3125 3118 }
3126 3119 }
3127 3120
3128 3121 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3129 3122 errno != EPERM && errno != EACCES)) {
3130 3123 if (policy.zrp_request &
3131 3124 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3132 3125 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3133 3126 zpool_rewind_exclaim(hdl, zc.zc_name,
3134 3127 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3135 3128 nvi);
3136 3129 nvlist_free(nvi);
3137 3130 }
3138 3131 zcmd_free_nvlists(&zc);
3139 3132 return (0);
3140 3133 }
3141 3134
3142 3135 zcmd_free_nvlists(&zc);
3143 3136 return (zpool_standard_error(hdl, errno, msg));
3144 3137 }
3145 3138
3146 3139 /*
3147 3140 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3148 3141 */
3149 3142 int
3150 3143 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3151 3144 {
3152 3145 zfs_cmd_t zc = { 0 };
3153 3146 char msg[1024];
3154 3147 libzfs_handle_t *hdl = zhp->zpool_hdl;
3155 3148
3156 3149 (void) snprintf(msg, sizeof (msg),
3157 3150 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3158 3151 guid);
3159 3152
3160 3153 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3161 3154 zc.zc_guid = guid;
3162 3155 zc.zc_cookie = ZPOOL_NO_REWIND;
3163 3156
3164 3157 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3165 3158 return (0);
3166 3159
3167 3160 return (zpool_standard_error(hdl, errno, msg));
3168 3161 }
3169 3162
3170 3163 /*
3171 3164 * Change the GUID for a pool.
3172 3165 */
3173 3166 int
3174 3167 zpool_reguid(zpool_handle_t *zhp)
3175 3168 {
3176 3169 char msg[1024];
3177 3170 libzfs_handle_t *hdl = zhp->zpool_hdl;
3178 3171 zfs_cmd_t zc = { 0 };
3179 3172
3180 3173 (void) snprintf(msg, sizeof (msg),
3181 3174 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3182 3175
3183 3176 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3184 3177 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3185 3178 return (0);
3186 3179
3187 3180 return (zpool_standard_error(hdl, errno, msg));
3188 3181 }
3189 3182
3190 3183 /*
3191 3184 * Reopen the pool.
3192 3185 */
3193 3186 int
3194 3187 zpool_reopen(zpool_handle_t *zhp)
3195 3188 {
3196 3189 zfs_cmd_t zc = { 0 };
3197 3190 char msg[1024];
3198 3191 libzfs_handle_t *hdl = zhp->zpool_hdl;
3199 3192
3200 3193 (void) snprintf(msg, sizeof (msg),
3201 3194 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3202 3195 zhp->zpool_name);
3203 3196
3204 3197 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3205 3198 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3206 3199 return (0);
3207 3200 return (zpool_standard_error(hdl, errno, msg));
3208 3201 }
3209 3202
3210 3203 /*
3211 3204 * Convert from a devid string to a path.
3212 3205 */
3213 3206 static char *
3214 3207 devid_to_path(char *devid_str)
3215 3208 {
3216 3209 ddi_devid_t devid;
3217 3210 char *minor;
3218 3211 char *path;
3219 3212 devid_nmlist_t *list = NULL;
3220 3213 int ret;
3221 3214
3222 3215 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3223 3216 return (NULL);
3224 3217
3225 3218 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3226 3219
3227 3220 devid_str_free(minor);
3228 3221 devid_free(devid);
3229 3222
3230 3223 if (ret != 0)
3231 3224 return (NULL);
3232 3225
3233 3226 /*
3234 3227 * In a case the strdup() fails, we will just return NULL below.
3235 3228 */
3236 3229 path = strdup(list[0].devname);
3237 3230
3238 3231 devid_free_nmlist(list);
3239 3232
3240 3233 return (path);
3241 3234 }
3242 3235
3243 3236 /*
3244 3237 * Convert from a path to a devid string.
3245 3238 */
3246 3239 static char *
3247 3240 path_to_devid(const char *path)
3248 3241 {
3249 3242 int fd;
3250 3243 ddi_devid_t devid;
3251 3244 char *minor, *ret;
3252 3245
3253 3246 if ((fd = open(path, O_RDONLY)) < 0)
3254 3247 return (NULL);
3255 3248
3256 3249 minor = NULL;
3257 3250 ret = NULL;
3258 3251 if (devid_get(fd, &devid) == 0) {
3259 3252 if (devid_get_minor_name(fd, &minor) == 0)
3260 3253 ret = devid_str_encode(devid, minor);
3261 3254 if (minor != NULL)
3262 3255 devid_str_free(minor);
3263 3256 devid_free(devid);
3264 3257 }
3265 3258 (void) close(fd);
3266 3259
3267 3260 return (ret);
3268 3261 }
3269 3262
3270 3263 /*
3271 3264 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3272 3265 * ignore any failure here, since a common case is for an unprivileged user to
3273 3266 * type 'zpool status', and we'll display the correct information anyway.
3274 3267 */
3275 3268 static void
3276 3269 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3277 3270 {
3278 3271 zfs_cmd_t zc = { 0 };
3279 3272
3280 3273 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3281 3274 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3282 3275 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3283 3276 &zc.zc_guid) == 0);
3284 3277
3285 3278 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3286 3279 }
3287 3280
3288 3281 /*
3289 3282 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3290 3283 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3291 3284 * We also check if this is a whole disk, in which case we strip off the
3292 3285 * trailing 's0' slice name.
3293 3286 *
3294 3287 * This routine is also responsible for identifying when disks have been
3295 3288 * reconfigured in a new location. The kernel will have opened the device by
3296 3289 * devid, but the path will still refer to the old location. To catch this, we
3297 3290 * first do a path -> devid translation (which is fast for the common case). If
3298 3291 * the devid matches, we're done. If not, we do a reverse devid -> path
3299 3292 * translation and issue the appropriate ioctl() to update the path of the vdev.
3300 3293 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3301 3294 * of these checks.
3302 3295 */
3303 3296 char *
3304 3297 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3305 3298 boolean_t verbose)
3306 3299 {
3307 3300 char *path, *devid;
3308 3301 uint64_t value;
3309 3302 char buf[64];
3310 3303 vdev_stat_t *vs;
3311 3304 uint_t vsc;
3312 3305
3313 3306 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3314 3307 &value) == 0) {
3315 3308 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3316 3309 &value) == 0);
3317 3310 (void) snprintf(buf, sizeof (buf), "%llu",
3318 3311 (u_longlong_t)value);
3319 3312 path = buf;
3320 3313 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3321 3314
3322 3315 /*
3323 3316 * If the device is dead (faulted, offline, etc) then don't
3324 3317 * bother opening it. Otherwise we may be forcing the user to
3325 3318 * open a misbehaving device, which can have undesirable
3326 3319 * effects.
3327 3320 */
3328 3321 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3329 3322 (uint64_t **)&vs, &vsc) != 0 ||
3330 3323 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3331 3324 zhp != NULL &&
3332 3325 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3333 3326 /*
3334 3327 * Determine if the current path is correct.
3335 3328 */
3336 3329 char *newdevid = path_to_devid(path);
3337 3330
3338 3331 if (newdevid == NULL ||
3339 3332 strcmp(devid, newdevid) != 0) {
3340 3333 char *newpath;
3341 3334
3342 3335 if ((newpath = devid_to_path(devid)) != NULL) {
3343 3336 /*
3344 3337 * Update the path appropriately.
3345 3338 */
3346 3339 set_path(zhp, nv, newpath);
3347 3340 if (nvlist_add_string(nv,
3348 3341 ZPOOL_CONFIG_PATH, newpath) == 0)
3349 3342 verify(nvlist_lookup_string(nv,
3350 3343 ZPOOL_CONFIG_PATH,
3351 3344 &path) == 0);
3352 3345 free(newpath);
3353 3346 }
3354 3347 }
3355 3348
3356 3349 if (newdevid)
3357 3350 devid_str_free(newdevid);
3358 3351 }
3359 3352
3360 3353 if (strncmp(path, "/dev/dsk/", 9) == 0)
3361 3354 path += 9;
3362 3355
3363 3356 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3364 3357 &value) == 0 && value) {
3365 3358 int pathlen = strlen(path);
3366 3359 char *tmp = zfs_strdup(hdl, path);
3367 3360
3368 3361 /*
3369 3362 * If it starts with c#, and ends with "s0", chop
3370 3363 * the "s0" off, or if it ends with "s0/old", remove
3371 3364 * the "s0" from the middle.
3372 3365 */
3373 3366 if (CTD_CHECK(tmp)) {
3374 3367 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3375 3368 tmp[pathlen - 2] = '\0';
3376 3369 } else if (pathlen > 6 &&
3377 3370 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3378 3371 (void) strcpy(&tmp[pathlen - 6],
3379 3372 "/old");
3380 3373 }
3381 3374 }
3382 3375 return (tmp);
3383 3376 }
3384 3377 } else {
3385 3378 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3386 3379
3387 3380 /*
3388 3381 * If it's a raidz device, we need to stick in the parity level.
3389 3382 */
3390 3383 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3391 3384 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3392 3385 &value) == 0);
3393 3386 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3394 3387 (u_longlong_t)value);
3395 3388 path = buf;
3396 3389 }
3397 3390
3398 3391 /*
3399 3392 * We identify each top-level vdev by using a <type-id>
3400 3393 * naming convention.
3401 3394 */
3402 3395 if (verbose) {
3403 3396 uint64_t id;
3404 3397
3405 3398 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3406 3399 &id) == 0);
3407 3400 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3408 3401 (u_longlong_t)id);
3409 3402 path = buf;
3410 3403 }
3411 3404 }
3412 3405
3413 3406 return (zfs_strdup(hdl, path));
3414 3407 }
3415 3408
3416 3409 static int
3417 3410 zbookmark_mem_compare(const void *a, const void *b)
3418 3411 {
3419 3412 return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3420 3413 }
3421 3414
3422 3415 /*
3423 3416 * Retrieve the persistent error log, uniquify the members, and return to the
3424 3417 * caller.
3425 3418 */
3426 3419 int
3427 3420 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3428 3421 {
3429 3422 zfs_cmd_t zc = { 0 };
3430 3423 uint64_t count;
3431 3424 zbookmark_phys_t *zb = NULL;
3432 3425 int i;
3433 3426
3434 3427 /*
3435 3428 * Retrieve the raw error list from the kernel. If the number of errors
3436 3429 * has increased, allocate more space and continue until we get the
3437 3430 * entire list.
3438 3431 */
3439 3432 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3440 3433 &count) == 0);
3441 3434 if (count == 0)
3442 3435 return (0);
3443 3436 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3444 3437 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3445 3438 return (-1);
3446 3439 zc.zc_nvlist_dst_size = count;
3447 3440 (void) strcpy(zc.zc_name, zhp->zpool_name);
3448 3441 for (;;) {
3449 3442 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3450 3443 &zc) != 0) {
3451 3444 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3452 3445 if (errno == ENOMEM) {
3453 3446 void *dst;
3454 3447
3455 3448 count = zc.zc_nvlist_dst_size;
3456 3449 dst = zfs_alloc(zhp->zpool_hdl, count *
3457 3450 sizeof (zbookmark_phys_t));
3458 3451 if (dst == NULL)
3459 3452 return (-1);
3460 3453 zc.zc_nvlist_dst = (uintptr_t)dst;
3461 3454 } else {
3462 3455 return (-1);
3463 3456 }
3464 3457 } else {
3465 3458 break;
3466 3459 }
3467 3460 }
3468 3461
3469 3462 /*
3470 3463 * Sort the resulting bookmarks. This is a little confusing due to the
3471 3464 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3472 3465 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3473 3466 * _not_ copied as part of the process. So we point the start of our
3474 3467 * array appropriate and decrement the total number of elements.
3475 3468 */
3476 3469 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3477 3470 zc.zc_nvlist_dst_size;
3478 3471 count -= zc.zc_nvlist_dst_size;
3479 3472
3480 3473 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
3481 3474
3482 3475 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3483 3476
3484 3477 /*
3485 3478 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3486 3479 */
3487 3480 for (i = 0; i < count; i++) {
3488 3481 nvlist_t *nv;
3489 3482
3490 3483 /* ignoring zb_blkid and zb_level for now */
3491 3484 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3492 3485 zb[i-1].zb_object == zb[i].zb_object)
3493 3486 continue;
3494 3487
3495 3488 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3496 3489 goto nomem;
3497 3490 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3498 3491 zb[i].zb_objset) != 0) {
3499 3492 nvlist_free(nv);
3500 3493 goto nomem;
3501 3494 }
3502 3495 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3503 3496 zb[i].zb_object) != 0) {
3504 3497 nvlist_free(nv);
3505 3498 goto nomem;
3506 3499 }
3507 3500 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3508 3501 nvlist_free(nv);
3509 3502 goto nomem;
3510 3503 }
3511 3504 nvlist_free(nv);
3512 3505 }
3513 3506
3514 3507 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3515 3508 return (0);
3516 3509
3517 3510 nomem:
3518 3511 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3519 3512 return (no_memory(zhp->zpool_hdl));
3520 3513 }
3521 3514
3522 3515 /*
3523 3516 * Upgrade a ZFS pool to the latest on-disk version.
3524 3517 */
3525 3518 int
3526 3519 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3527 3520 {
3528 3521 zfs_cmd_t zc = { 0 };
3529 3522 libzfs_handle_t *hdl = zhp->zpool_hdl;
3530 3523
3531 3524 (void) strcpy(zc.zc_name, zhp->zpool_name);
3532 3525 zc.zc_cookie = new_version;
3533 3526
3534 3527 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3535 3528 return (zpool_standard_error_fmt(hdl, errno,
3536 3529 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3537 3530 zhp->zpool_name));
3538 3531 return (0);
3539 3532 }
3540 3533
3541 3534 void
3542 3535 zfs_save_arguments(int argc, char **argv, char *string, int len)
3543 3536 {
3544 3537 (void) strlcpy(string, basename(argv[0]), len);
3545 3538 for (int i = 1; i < argc; i++) {
3546 3539 (void) strlcat(string, " ", len);
3547 3540 (void) strlcat(string, argv[i], len);
3548 3541 }
3549 3542 }
3550 3543
3551 3544 int
3552 3545 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3553 3546 {
3554 3547 zfs_cmd_t zc = { 0 };
3555 3548 nvlist_t *args;
3556 3549 int err;
3557 3550
3558 3551 args = fnvlist_alloc();
3559 3552 fnvlist_add_string(args, "message", message);
3560 3553 err = zcmd_write_src_nvlist(hdl, &zc, args);
3561 3554 if (err == 0)
3562 3555 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3563 3556 nvlist_free(args);
3564 3557 zcmd_free_nvlists(&zc);
3565 3558 return (err);
3566 3559 }
3567 3560
3568 3561 /*
3569 3562 * Perform ioctl to get some command history of a pool.
3570 3563 *
3571 3564 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3572 3565 * logical offset of the history buffer to start reading from.
3573 3566 *
3574 3567 * Upon return, 'off' is the next logical offset to read from and
3575 3568 * 'len' is the actual amount of bytes read into 'buf'.
3576 3569 */
3577 3570 static int
3578 3571 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3579 3572 {
3580 3573 zfs_cmd_t zc = { 0 };
3581 3574 libzfs_handle_t *hdl = zhp->zpool_hdl;
3582 3575
3583 3576 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3584 3577
3585 3578 zc.zc_history = (uint64_t)(uintptr_t)buf;
3586 3579 zc.zc_history_len = *len;
3587 3580 zc.zc_history_offset = *off;
3588 3581
3589 3582 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3590 3583 switch (errno) {
3591 3584 case EPERM:
3592 3585 return (zfs_error_fmt(hdl, EZFS_PERM,
3593 3586 dgettext(TEXT_DOMAIN,
3594 3587 "cannot show history for pool '%s'"),
3595 3588 zhp->zpool_name));
3596 3589 case ENOENT:
3597 3590 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3598 3591 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3599 3592 "'%s'"), zhp->zpool_name));
3600 3593 case ENOTSUP:
3601 3594 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3602 3595 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3603 3596 "'%s', pool must be upgraded"), zhp->zpool_name));
3604 3597 default:
3605 3598 return (zpool_standard_error_fmt(hdl, errno,
3606 3599 dgettext(TEXT_DOMAIN,
3607 3600 "cannot get history for '%s'"), zhp->zpool_name));
3608 3601 }
3609 3602 }
3610 3603
3611 3604 *len = zc.zc_history_len;
3612 3605 *off = zc.zc_history_offset;
3613 3606
3614 3607 return (0);
3615 3608 }
3616 3609
3617 3610 /*
3618 3611 * Process the buffer of nvlists, unpacking and storing each nvlist record
3619 3612 * into 'records'. 'leftover' is set to the number of bytes that weren't
3620 3613 * processed as there wasn't a complete record.
3621 3614 */
3622 3615 int
3623 3616 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3624 3617 nvlist_t ***records, uint_t *numrecords)
3625 3618 {
3626 3619 uint64_t reclen;
3627 3620 nvlist_t *nv;
3628 3621 int i;
3629 3622
3630 3623 while (bytes_read > sizeof (reclen)) {
3631 3624
3632 3625 /* get length of packed record (stored as little endian) */
3633 3626 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3634 3627 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3635 3628
3636 3629 if (bytes_read < sizeof (reclen) + reclen)
3637 3630 break;
3638 3631
3639 3632 /* unpack record */
3640 3633 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3641 3634 return (ENOMEM);
3642 3635 bytes_read -= sizeof (reclen) + reclen;
3643 3636 buf += sizeof (reclen) + reclen;
3644 3637
3645 3638 /* add record to nvlist array */
3646 3639 (*numrecords)++;
3647 3640 if (ISP2(*numrecords + 1)) {
3648 3641 *records = realloc(*records,
3649 3642 *numrecords * 2 * sizeof (nvlist_t *));
3650 3643 }
3651 3644 (*records)[*numrecords - 1] = nv;
3652 3645 }
3653 3646
3654 3647 *leftover = bytes_read;
3655 3648 return (0);
3656 3649 }
3657 3650
3658 3651 /*
3659 3652 * Retrieve the command history of a pool.
3660 3653 */
3661 3654 int
3662 3655 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3663 3656 {
3664 3657 char *buf;
3665 3658 int buflen = 128 * 1024;
3666 3659 uint64_t off = 0;
3667 3660 nvlist_t **records = NULL;
3668 3661 uint_t numrecords = 0;
3669 3662 int err, i;
3670 3663
3671 3664 buf = malloc(buflen);
3672 3665 if (buf == NULL)
3673 3666 return (ENOMEM);
3674 3667 do {
3675 3668 uint64_t bytes_read = buflen;
3676 3669 uint64_t leftover;
3677 3670
3678 3671 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3679 3672 break;
3680 3673
3681 3674 /* if nothing else was read in, we're at EOF, just return */
3682 3675 if (!bytes_read)
3683 3676 break;
3684 3677
3685 3678 if ((err = zpool_history_unpack(buf, bytes_read,
3686 3679 &leftover, &records, &numrecords)) != 0)
3687 3680 break;
3688 3681 off -= leftover;
3689 3682 if (leftover == bytes_read) {
3690 3683 /*
3691 3684 * no progress made, because buffer is not big enough
3692 3685 * to hold this record; resize and retry.
3693 3686 */
3694 3687 buflen *= 2;
3695 3688 free(buf);
3696 3689 buf = malloc(buflen);
3697 3690 if (buf == NULL)
3698 3691 return (ENOMEM);
3699 3692 }
3700 3693
3701 3694 /* CONSTCOND */
3702 3695 } while (1);
3703 3696
3704 3697 free(buf);
3705 3698
3706 3699 if (!err) {
3707 3700 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3708 3701 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3709 3702 records, numrecords) == 0);
3710 3703 }
3711 3704 for (i = 0; i < numrecords; i++)
3712 3705 nvlist_free(records[i]);
3713 3706 free(records);
3714 3707
3715 3708 return (err);
3716 3709 }
3717 3710
3718 3711 void
3719 3712 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3720 3713 char *pathname, size_t len)
3721 3714 {
3722 3715 zfs_cmd_t zc = { 0 };
3723 3716 boolean_t mounted = B_FALSE;
3724 3717 char *mntpnt = NULL;
3725 3718 char dsname[MAXNAMELEN];
3726 3719
3727 3720 if (dsobj == 0) {
3728 3721 /* special case for the MOS */
3729 3722 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3730 3723 return;
3731 3724 }
3732 3725
3733 3726 /* get the dataset's name */
3734 3727 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3735 3728 zc.zc_obj = dsobj;
3736 3729 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3737 3730 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3738 3731 /* just write out a path of two object numbers */
3739 3732 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3740 3733 dsobj, obj);
3741 3734 return;
3742 3735 }
3743 3736 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3744 3737
3745 3738 /* find out if the dataset is mounted */
3746 3739 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3747 3740
3748 3741 /* get the corrupted object's path */
3749 3742 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3750 3743 zc.zc_obj = obj;
3751 3744 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3752 3745 &zc) == 0) {
3753 3746 if (mounted) {
3754 3747 (void) snprintf(pathname, len, "%s%s", mntpnt,
3755 3748 zc.zc_value);
3756 3749 } else {
3757 3750 (void) snprintf(pathname, len, "%s:%s",
3758 3751 dsname, zc.zc_value);
3759 3752 }
3760 3753 } else {
3761 3754 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3762 3755 }
3763 3756 free(mntpnt);
3764 3757 }
3765 3758
3766 3759 /*
3767 3760 * Read the EFI label from the config, if a label does not exist then
3768 3761 * pass back the error to the caller. If the caller has passed a non-NULL
3769 3762 * diskaddr argument then we set it to the starting address of the EFI
3770 3763 * partition.
3771 3764 */
3772 3765 static int
3773 3766 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3774 3767 {
3775 3768 char *path;
3776 3769 int fd;
3777 3770 char diskname[MAXPATHLEN];
3778 3771 int err = -1;
3779 3772
3780 3773 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3781 3774 return (err);
3782 3775
3783 3776 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3784 3777 strrchr(path, '/'));
3785 3778 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3786 3779 struct dk_gpt *vtoc;
3787 3780
3788 3781 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3789 3782 if (sb != NULL)
3790 3783 *sb = vtoc->efi_parts[0].p_start;
3791 3784 efi_free(vtoc);
3792 3785 }
3793 3786 (void) close(fd);
3794 3787 }
3795 3788 return (err);
3796 3789 }
3797 3790
3798 3791 /*
3799 3792 * determine where a partition starts on a disk in the current
3800 3793 * configuration
3801 3794 */
3802 3795 static diskaddr_t
3803 3796 find_start_block(nvlist_t *config)
3804 3797 {
3805 3798 nvlist_t **child;
3806 3799 uint_t c, children;
3807 3800 diskaddr_t sb = MAXOFFSET_T;
3808 3801 uint64_t wholedisk;
3809 3802
3810 3803 if (nvlist_lookup_nvlist_array(config,
3811 3804 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3812 3805 if (nvlist_lookup_uint64(config,
3813 3806 ZPOOL_CONFIG_WHOLE_DISK,
3814 3807 &wholedisk) != 0 || !wholedisk) {
3815 3808 return (MAXOFFSET_T);
3816 3809 }
3817 3810 if (read_efi_label(config, &sb) < 0)
3818 3811 sb = MAXOFFSET_T;
3819 3812 return (sb);
3820 3813 }
3821 3814
3822 3815 for (c = 0; c < children; c++) {
3823 3816 sb = find_start_block(child[c]);
3824 3817 if (sb != MAXOFFSET_T) {
3825 3818 return (sb);
3826 3819 }
3827 3820 }
3828 3821 return (MAXOFFSET_T);
3829 3822 }
3830 3823
3831 3824 /*
3832 3825 * Label an individual disk. The name provided is the short name,
3833 3826 * stripped of any leading /dev path.
3834 3827 */
3835 3828 int
3836 3829 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3837 3830 {
3838 3831 char path[MAXPATHLEN];
3839 3832 struct dk_gpt *vtoc;
3840 3833 int fd;
3841 3834 size_t resv = EFI_MIN_RESV_SIZE;
3842 3835 uint64_t slice_size;
3843 3836 diskaddr_t start_block;
3844 3837 char errbuf[1024];
3845 3838
3846 3839 /* prepare an error message just in case */
3847 3840 (void) snprintf(errbuf, sizeof (errbuf),
3848 3841 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3849 3842
3850 3843 if (zhp) {
3851 3844 nvlist_t *nvroot;
3852 3845
3853 3846 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3854 3847 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3855 3848
3856 3849 if (zhp->zpool_start_block == 0)
3857 3850 start_block = find_start_block(nvroot);
3858 3851 else
3859 3852 start_block = zhp->zpool_start_block;
3860 3853 zhp->zpool_start_block = start_block;
3861 3854 } else {
3862 3855 /* new pool */
3863 3856 start_block = NEW_START_BLOCK;
3864 3857 }
3865 3858
3866 3859 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3867 3860 BACKUP_SLICE);
3868 3861
3869 3862 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3870 3863 /*
3871 3864 * This shouldn't happen. We've long since verified that this
3872 3865 * is a valid device.
3873 3866 */
3874 3867 zfs_error_aux(hdl,
3875 3868 dgettext(TEXT_DOMAIN, "unable to open device"));
3876 3869 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3877 3870 }
3878 3871
3879 3872 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3880 3873 /*
3881 3874 * The only way this can fail is if we run out of memory, or we
3882 3875 * were unable to read the disk's capacity
3883 3876 */
3884 3877 if (errno == ENOMEM)
3885 3878 (void) no_memory(hdl);
3886 3879
3887 3880 (void) close(fd);
3888 3881 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3889 3882 "unable to read disk capacity"), name);
3890 3883
3891 3884 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3892 3885 }
3893 3886
3894 3887 slice_size = vtoc->efi_last_u_lba + 1;
3895 3888 slice_size -= EFI_MIN_RESV_SIZE;
3896 3889 if (start_block == MAXOFFSET_T)
3897 3890 start_block = NEW_START_BLOCK;
3898 3891 slice_size -= start_block;
3899 3892
3900 3893 vtoc->efi_parts[0].p_start = start_block;
3901 3894 vtoc->efi_parts[0].p_size = slice_size;
3902 3895
3903 3896 /*
3904 3897 * Why we use V_USR: V_BACKUP confuses users, and is considered
3905 3898 * disposable by some EFI utilities (since EFI doesn't have a backup
3906 3899 * slice). V_UNASSIGNED is supposed to be used only for zero size
3907 3900 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3908 3901 * etc. were all pretty specific. V_USR is as close to reality as we
3909 3902 * can get, in the absence of V_OTHER.
3910 3903 */
3911 3904 vtoc->efi_parts[0].p_tag = V_USR;
3912 3905 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3913 3906
3914 3907 vtoc->efi_parts[8].p_start = slice_size + start_block;
3915 3908 vtoc->efi_parts[8].p_size = resv;
3916 3909 vtoc->efi_parts[8].p_tag = V_RESERVED;
3917 3910
3918 3911 if (efi_write(fd, vtoc) != 0) {
3919 3912 /*
3920 3913 * Some block drivers (like pcata) may not support EFI
3921 3914 * GPT labels. Print out a helpful error message dir-
3922 3915 * ecting the user to manually label the disk and give
3923 3916 * a specific slice.
3924 3917 */
3925 3918 (void) close(fd);
3926 3919 efi_free(vtoc);
3927 3920
3928 3921 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3929 3922 "try using fdisk(1M) and then provide a specific slice"));
3930 3923 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3931 3924 }
3932 3925
3933 3926 (void) close(fd);
3934 3927 efi_free(vtoc);
3935 3928 return (0);
3936 3929 }
3937 3930
3938 3931 static boolean_t
3939 3932 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3940 3933 {
3941 3934 char *type;
3942 3935 nvlist_t **child;
3943 3936 uint_t children, c;
3944 3937
3945 3938 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3946 3939 if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
3947 3940 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3948 3941 strcmp(type, VDEV_TYPE_MISSING) == 0) {
3949 3942 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3950 3943 "vdev type '%s' is not supported"), type);
3951 3944 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3952 3945 return (B_FALSE);
3953 3946 }
3954 3947 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3955 3948 &child, &children) == 0) {
3956 3949 for (c = 0; c < children; c++) {
3957 3950 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3958 3951 return (B_FALSE);
3959 3952 }
3960 3953 }
3961 3954 return (B_TRUE);
3962 3955 }
3963 3956
3964 3957 /*
3965 3958 * Check if this zvol is allowable for use as a dump device; zero if
3966 3959 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
3967 3960 *
3968 3961 * Allowable storage configurations include mirrors, all raidz variants, and
3969 3962 * pools with log, cache, and spare devices. Pools which are backed by files or
3970 3963 * have missing/hole vdevs are not suitable.
3971 3964 */
3972 3965 int
3973 3966 zvol_check_dump_config(char *arg)
3974 3967 {
3975 3968 zpool_handle_t *zhp = NULL;
3976 3969 nvlist_t *config, *nvroot;
3977 3970 char *p, *volname;
3978 3971 nvlist_t **top;
3979 3972 uint_t toplevels;
3980 3973 libzfs_handle_t *hdl;
3981 3974 char errbuf[1024];
3982 3975 char poolname[ZPOOL_MAXNAMELEN];
3983 3976 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
3984 3977 int ret = 1;
3985 3978
3986 3979 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
3987 3980 return (-1);
3988 3981 }
3989 3982
3990 3983 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
3991 3984 "dump is not supported on device '%s'"), arg);
3992 3985
3993 3986 if ((hdl = libzfs_init()) == NULL)
3994 3987 return (1);
3995 3988 libzfs_print_on_error(hdl, B_TRUE);
3996 3989
3997 3990 volname = arg + pathlen;
3998 3991
3999 3992 /* check the configuration of the pool */
4000 3993 if ((p = strchr(volname, '/')) == NULL) {
4001 3994 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4002 3995 "malformed dataset name"));
4003 3996 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4004 3997 return (1);
4005 3998 } else if (p - volname >= ZFS_MAXNAMELEN) {
4006 3999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4007 4000 "dataset name is too long"));
4008 4001 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4009 4002 return (1);
4010 4003 } else {
4011 4004 (void) strncpy(poolname, volname, p - volname);
4012 4005 poolname[p - volname] = '\0';
4013 4006 }
4014 4007
4015 4008 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4016 4009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4017 4010 "could not open pool '%s'"), poolname);
4018 4011 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4019 4012 goto out;
4020 4013 }
4021 4014 config = zpool_get_config(zhp, NULL);
4022 4015 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4023 4016 &nvroot) != 0) {
4024 4017 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4025 4018 "could not obtain vdev configuration for '%s'"), poolname);
4026 4019 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4027 4020 goto out;
4028 4021 }
4029 4022
4030 4023 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4031 4024 &top, &toplevels) == 0);
4032 4025
4033 4026 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4034 4027 goto out;
4035 4028 }
4036 4029 ret = 0;
4037 4030
4038 4031 out:
4039 4032 if (zhp)
4040 4033 zpool_close(zhp);
4041 4034 libzfs_fini(hdl);
4042 4035 return (ret);
4043 4036 }
↓ open down ↓ |
1024 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX