1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2015 Nexenta Systems, Inc.  All rights reserved.
  24  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  25  * Copyright (c) 2013 by Delphix. All rights reserved.
  26  * Copyright 2015 RackTop Systems.
  27  */
  28 
  29 /*
  30  * Pool import support functions.
  31  *
  32  * To import a pool, we rely on reading the configuration information from the
  33  * ZFS label of each device.  If we successfully read the label, then we
  34  * organize the configuration information in the following hierarchy:
  35  *
  36  *      pool guid -> toplevel vdev guid -> label txg
  37  *
  38  * Duplicate entries matching this same tuple will be discarded.  Once we have
  39  * examined every device, we pick the best label txg config for each toplevel
  40  * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
  41  * update any paths that have changed.  Finally, we attempt to import the pool
  42  * using our derived config, and record the results.
  43  */
  44 
  45 #include <ctype.h>
  46 #include <devid.h>
  47 #include <dirent.h>
  48 #include <errno.h>
  49 #include <libintl.h>
  50 #include <stddef.h>
  51 #include <stdlib.h>
  52 #include <string.h>
  53 #include <sys/stat.h>
  54 #include <unistd.h>
  55 #include <fcntl.h>
  56 #include <sys/vtoc.h>
  57 #include <sys/dktp/fdisk.h>
  58 #include <sys/efi_partition.h>
  59 #include <thread_pool.h>
  60 
  61 #include <sys/vdev_impl.h>
  62 
  63 #include "libzfs.h"
  64 #include "libzfs_impl.h"
  65 
  66 /*
  67  * Intermediate structures used to gather configuration information.
  68  */
  69 typedef struct config_entry {
  70         uint64_t                ce_txg;
  71         nvlist_t                *ce_config;
  72         struct config_entry     *ce_next;
  73 } config_entry_t;
  74 
  75 typedef struct vdev_entry {
  76         uint64_t                ve_guid;
  77         config_entry_t          *ve_configs;
  78         struct vdev_entry       *ve_next;
  79 } vdev_entry_t;
  80 
  81 typedef struct pool_entry {
  82         uint64_t                pe_guid;
  83         vdev_entry_t            *pe_vdevs;
  84         struct pool_entry       *pe_next;
  85 } pool_entry_t;
  86 
  87 typedef struct name_entry {
  88         char                    *ne_name;
  89         uint64_t                ne_guid;
  90         struct name_entry       *ne_next;
  91 } name_entry_t;
  92 
  93 typedef struct pool_list {
  94         pool_entry_t            *pools;
  95         name_entry_t            *names;
  96 } pool_list_t;
  97 
  98 static char *
  99 get_devid(const char *path)
 100 {
 101         int fd;
 102         ddi_devid_t devid;
 103         char *minor, *ret;
 104 
 105         if ((fd = open(path, O_RDONLY)) < 0)
 106                 return (NULL);
 107 
 108         minor = NULL;
 109         ret = NULL;
 110         if (devid_get(fd, &devid) == 0) {
 111                 if (devid_get_minor_name(fd, &minor) == 0)
 112                         ret = devid_str_encode(devid, minor);
 113                 if (minor != NULL)
 114                         devid_str_free(minor);
 115                 devid_free(devid);
 116         }
 117         (void) close(fd);
 118 
 119         return (ret);
 120 }
 121 
 122 
 123 /*
 124  * Go through and fix up any path and/or devid information for the given vdev
 125  * configuration.
 126  */
 127 static int
 128 fix_paths(nvlist_t *nv, name_entry_t *names)
 129 {
 130         nvlist_t **child;
 131         uint_t c, children;
 132         uint64_t guid;
 133         name_entry_t *ne, *best;
 134         char *path, *devid;
 135         int matched;
 136 
 137         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
 138             &child, &children) == 0) {
 139                 for (c = 0; c < children; c++)
 140                         if (fix_paths(child[c], names) != 0)
 141                                 return (-1);
 142                 return (0);
 143         }
 144 
 145         /*
 146          * This is a leaf (file or disk) vdev.  In either case, go through
 147          * the name list and see if we find a matching guid.  If so, replace
 148          * the path and see if we can calculate a new devid.
 149          *
 150          * There may be multiple names associated with a particular guid, in
 151          * which case we have overlapping slices or multiple paths to the same
 152          * disk.  If this is the case, then we want to pick the path that is
 153          * the most similar to the original, where "most similar" is the number
 154          * of matching characters starting from the end of the path.  This will
 155          * preserve slice numbers even if the disks have been reorganized, and
 156          * will also catch preferred disk names if multiple paths exist.
 157          */
 158         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
 159         if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
 160                 path = NULL;
 161 
 162         matched = 0;
 163         best = NULL;
 164         for (ne = names; ne != NULL; ne = ne->ne_next) {
 165                 if (ne->ne_guid == guid) {
 166                         const char *src, *dst;
 167                         int count;
 168 
 169                         if (path == NULL) {
 170                                 best = ne;
 171                                 break;
 172                         }
 173 
 174                         src = ne->ne_name + strlen(ne->ne_name) - 1;
 175                         dst = path + strlen(path) - 1;
 176                         for (count = 0; src >= ne->ne_name && dst >= path;
 177                             src--, dst--, count++)
 178                                 if (*src != *dst)
 179                                         break;
 180 
 181                         /*
 182                          * At this point, 'count' is the number of characters
 183                          * matched from the end.
 184                          */
 185                         if (count > matched || best == NULL) {
 186                                 best = ne;
 187                                 matched = count;
 188                         }
 189                 }
 190         }
 191 
 192         if (best == NULL)
 193                 return (0);
 194 
 195         if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
 196                 return (-1);
 197 
 198         if ((devid = get_devid(best->ne_name)) == NULL) {
 199                 (void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
 200         } else {
 201                 if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0) {
 202                         devid_str_free(devid);
 203                         return (-1);
 204                 }
 205                 devid_str_free(devid);
 206         }
 207 
 208         return (0);
 209 }
 210 
 211 /*
 212  * Add the given configuration to the list of known devices.
 213  */
 214 static int
 215 add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
 216     nvlist_t *config)
 217 {
 218         uint64_t pool_guid, vdev_guid, top_guid, txg, state;
 219         pool_entry_t *pe;
 220         vdev_entry_t *ve;
 221         config_entry_t *ce;
 222         name_entry_t *ne;
 223 
 224         /*
 225          * If this is a hot spare not currently in use or level 2 cache
 226          * device, add it to the list of names to translate, but don't do
 227          * anything else.
 228          */
 229         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
 230             &state) == 0 &&
 231             (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
 232             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
 233                 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
 234                         return (-1);
 235 
 236                 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
 237                         free(ne);
 238                         return (-1);
 239                 }
 240                 ne->ne_guid = vdev_guid;
 241                 ne->ne_next = pl->names;
 242                 pl->names = ne;
 243                 nvlist_free(config);
 244                 return (0);
 245         }
 246 
 247         /*
 248          * If we have a valid config but cannot read any of these fields, then
 249          * it means we have a half-initialized label.  In vdev_label_init()
 250          * we write a label with txg == 0 so that we can identify the device
 251          * in case the user refers to the same disk later on.  If we fail to
 252          * create the pool, we'll be left with a label in this state
 253          * which should not be considered part of a valid pool.
 254          */
 255         if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 256             &pool_guid) != 0 ||
 257             nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
 258             &vdev_guid) != 0 ||
 259             nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
 260             &top_guid) != 0 ||
 261             nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
 262             &txg) != 0 || txg == 0) {
 263                 nvlist_free(config);
 264                 return (0);
 265         }
 266 
 267         /*
 268          * First, see if we know about this pool.  If not, then add it to the
 269          * list of known pools.
 270          */
 271         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
 272                 if (pe->pe_guid == pool_guid)
 273                         break;
 274         }
 275 
 276         if (pe == NULL) {
 277                 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL)
 278                         return (-1);
 279                 pe->pe_guid = pool_guid;
 280                 pe->pe_next = pl->pools;
 281                 pl->pools = pe;
 282         }
 283 
 284         /*
 285          * Second, see if we know about this toplevel vdev.  Add it if its
 286          * missing.
 287          */
 288         for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
 289                 if (ve->ve_guid == top_guid)
 290                         break;
 291         }
 292 
 293         if (ve == NULL) {
 294                 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL)
 295                         return (-1);
 296                 ve->ve_guid = top_guid;
 297                 ve->ve_next = pe->pe_vdevs;
 298                 pe->pe_vdevs = ve;
 299         }
 300 
 301         /*
 302          * Third, add the vdev guid -> path mappings so that we can fix up
 303          * the configuration as necessary before doing the import.
 304          */
 305         if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
 306                 return (-1);
 307 
 308         if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
 309                 free(ne);
 310                 return (-1);
 311         }
 312 
 313         ne->ne_guid = vdev_guid;
 314         ne->ne_next = pl->names;
 315         pl->names = ne;
 316 
 317         /*
 318          * Finally, see if we have a config with a matching transaction
 319          * group.  If so, then we do nothing.  Otherwise, add it to the list
 320          * of known configs.
 321          */
 322         for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
 323                 if (ce->ce_txg == txg)
 324                         break;
 325         }
 326 
 327         if (ce == NULL) {
 328                 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL)
 329                         return (-1);
 330                 ce->ce_txg = txg;
 331                 ce->ce_config = config;
 332                 ce->ce_next = ve->ve_configs;
 333                 ve->ve_configs = ce;
 334         } else {
 335                 nvlist_free(config);
 336         }
 337 
 338         return (0);
 339 }
 340 
 341 /*
 342  * Returns true if the named pool matches the given GUID.
 343  */
 344 static int
 345 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
 346     boolean_t *isactive)
 347 {
 348         zpool_handle_t *zhp;
 349         uint64_t theguid;
 350 
 351         if (zpool_open_silent(hdl, name, &zhp) != 0)
 352                 return (-1);
 353 
 354         if (zhp == NULL) {
 355                 *isactive = B_FALSE;
 356                 return (0);
 357         }
 358 
 359         verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
 360             &theguid) == 0);
 361 
 362         zpool_close(zhp);
 363 
 364         *isactive = (theguid == guid);
 365         return (0);
 366 }
 367 
 368 static nvlist_t *
 369 refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
 370 {
 371         nvlist_t *nvl;
 372         zfs_cmd_t zc = { 0 };
 373         int err;
 374 
 375         if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
 376                 return (NULL);
 377 
 378         if (zcmd_alloc_dst_nvlist(hdl, &zc,
 379             zc.zc_nvlist_conf_size * 2) != 0) {
 380                 zcmd_free_nvlists(&zc);
 381                 return (NULL);
 382         }
 383 
 384         while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
 385             &zc)) != 0 && errno == ENOMEM) {
 386                 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
 387                         zcmd_free_nvlists(&zc);
 388                         return (NULL);
 389                 }
 390         }
 391 
 392         if (err) {
 393                 zcmd_free_nvlists(&zc);
 394                 return (NULL);
 395         }
 396 
 397         if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
 398                 zcmd_free_nvlists(&zc);
 399                 return (NULL);
 400         }
 401 
 402         zcmd_free_nvlists(&zc);
 403         return (nvl);
 404 }
 405 
 406 /*
 407  * Determine if the vdev id is a hole in the namespace.
 408  */
 409 boolean_t
 410 vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
 411 {
 412         for (int c = 0; c < holes; c++) {
 413 
 414                 /* Top-level is a hole */
 415                 if (hole_array[c] == id)
 416                         return (B_TRUE);
 417         }
 418         return (B_FALSE);
 419 }
 420 
 421 /*
 422  * Convert our list of pools into the definitive set of configurations.  We
 423  * start by picking the best config for each toplevel vdev.  Once that's done,
 424  * we assemble the toplevel vdevs into a full config for the pool.  We make a
 425  * pass to fix up any incorrect paths, and then add it to the main list to
 426  * return to the user.
 427  */
 428 static nvlist_t *
 429 get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
 430 {
 431         pool_entry_t *pe;
 432         vdev_entry_t *ve;
 433         config_entry_t *ce;
 434         nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
 435         nvlist_t **spares, **l2cache;
 436         uint_t i, nspares, nl2cache;
 437         boolean_t config_seen;
 438         uint64_t best_txg;
 439         char *name, *hostname;
 440         uint64_t guid;
 441         uint_t children = 0;
 442         nvlist_t **child = NULL;
 443         uint_t holes;
 444         uint64_t *hole_array, max_id;
 445         uint_t c;
 446         boolean_t isactive;
 447         uint64_t hostid;
 448         nvlist_t *nvl;
 449         boolean_t found_one = B_FALSE;
 450         boolean_t valid_top_config = B_FALSE;
 451 
 452         if (nvlist_alloc(&ret, 0, 0) != 0)
 453                 goto nomem;
 454 
 455         for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
 456                 uint64_t id, max_txg = 0;
 457 
 458                 if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
 459                         goto nomem;
 460                 config_seen = B_FALSE;
 461 
 462                 /*
 463                  * Iterate over all toplevel vdevs.  Grab the pool configuration
 464                  * from the first one we find, and then go through the rest and
 465                  * add them as necessary to the 'vdevs' member of the config.
 466                  */
 467                 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
 468 
 469                         /*
 470                          * Determine the best configuration for this vdev by
 471                          * selecting the config with the latest transaction
 472                          * group.
 473                          */
 474                         best_txg = 0;
 475                         for (ce = ve->ve_configs; ce != NULL;
 476                             ce = ce->ce_next) {
 477 
 478                                 if (ce->ce_txg > best_txg) {
 479                                         tmp = ce->ce_config;
 480                                         best_txg = ce->ce_txg;
 481                                 }
 482                         }
 483 
 484                         /*
 485                          * We rely on the fact that the max txg for the
 486                          * pool will contain the most up-to-date information
 487                          * about the valid top-levels in the vdev namespace.
 488                          */
 489                         if (best_txg > max_txg) {
 490                                 (void) nvlist_remove(config,
 491                                     ZPOOL_CONFIG_VDEV_CHILDREN,
 492                                     DATA_TYPE_UINT64);
 493                                 (void) nvlist_remove(config,
 494                                     ZPOOL_CONFIG_HOLE_ARRAY,
 495                                     DATA_TYPE_UINT64_ARRAY);
 496 
 497                                 max_txg = best_txg;
 498                                 hole_array = NULL;
 499                                 holes = 0;
 500                                 max_id = 0;
 501                                 valid_top_config = B_FALSE;
 502 
 503                                 if (nvlist_lookup_uint64(tmp,
 504                                     ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
 505                                         verify(nvlist_add_uint64(config,
 506                                             ZPOOL_CONFIG_VDEV_CHILDREN,
 507                                             max_id) == 0);
 508                                         valid_top_config = B_TRUE;
 509                                 }
 510 
 511                                 if (nvlist_lookup_uint64_array(tmp,
 512                                     ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
 513                                     &holes) == 0) {
 514                                         verify(nvlist_add_uint64_array(config,
 515                                             ZPOOL_CONFIG_HOLE_ARRAY,
 516                                             hole_array, holes) == 0);
 517                                 }
 518                         }
 519 
 520                         if (!config_seen) {
 521                                 /*
 522                                  * Copy the relevant pieces of data to the pool
 523                                  * configuration:
 524                                  *
 525                                  *      version
 526                                  *      pool guid
 527                                  *      name
 528                                  *      comment (if available)
 529                                  *      pool state
 530                                  *      hostid (if available)
 531                                  *      hostname (if available)
 532                                  */
 533                                 uint64_t state, version;
 534                                 char *comment = NULL;
 535 
 536                                 version = fnvlist_lookup_uint64(tmp,
 537                                     ZPOOL_CONFIG_VERSION);
 538                                 fnvlist_add_uint64(config,
 539                                     ZPOOL_CONFIG_VERSION, version);
 540                                 guid = fnvlist_lookup_uint64(tmp,
 541                                     ZPOOL_CONFIG_POOL_GUID);
 542                                 fnvlist_add_uint64(config,
 543                                     ZPOOL_CONFIG_POOL_GUID, guid);
 544                                 name = fnvlist_lookup_string(tmp,
 545                                     ZPOOL_CONFIG_POOL_NAME);
 546                                 fnvlist_add_string(config,
 547                                     ZPOOL_CONFIG_POOL_NAME, name);
 548 
 549                                 if (nvlist_lookup_string(tmp,
 550                                     ZPOOL_CONFIG_COMMENT, &comment) == 0)
 551                                         fnvlist_add_string(config,
 552                                             ZPOOL_CONFIG_COMMENT, comment);
 553 
 554                                 state = fnvlist_lookup_uint64(tmp,
 555                                     ZPOOL_CONFIG_POOL_STATE);
 556                                 fnvlist_add_uint64(config,
 557                                     ZPOOL_CONFIG_POOL_STATE, state);
 558 
 559                                 hostid = 0;
 560                                 if (nvlist_lookup_uint64(tmp,
 561                                     ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
 562                                         fnvlist_add_uint64(config,
 563                                             ZPOOL_CONFIG_HOSTID, hostid);
 564                                         hostname = fnvlist_lookup_string(tmp,
 565                                             ZPOOL_CONFIG_HOSTNAME);
 566                                         fnvlist_add_string(config,
 567                                             ZPOOL_CONFIG_HOSTNAME, hostname);
 568                                 }
 569 
 570                                 config_seen = B_TRUE;
 571                         }
 572 
 573                         /*
 574                          * Add this top-level vdev to the child array.
 575                          */
 576                         verify(nvlist_lookup_nvlist(tmp,
 577                             ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
 578                         verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
 579                             &id) == 0);
 580 
 581                         if (id >= children) {
 582                                 nvlist_t **newchild;
 583 
 584                                 newchild = zfs_alloc(hdl, (id + 1) *
 585                                     sizeof (nvlist_t *));
 586                                 if (newchild == NULL)
 587                                         goto nomem;
 588 
 589                                 for (c = 0; c < children; c++)
 590                                         newchild[c] = child[c];
 591 
 592                                 free(child);
 593                                 child = newchild;
 594                                 children = id + 1;
 595                         }
 596                         if (nvlist_dup(nvtop, &child[id], 0) != 0)
 597                                 goto nomem;
 598 
 599                 }
 600 
 601                 /*
 602                  * If we have information about all the top-levels then
 603                  * clean up the nvlist which we've constructed. This
 604                  * means removing any extraneous devices that are
 605                  * beyond the valid range or adding devices to the end
 606                  * of our array which appear to be missing.
 607                  */
 608                 if (valid_top_config) {
 609                         if (max_id < children) {
 610                                 for (c = max_id; c < children; c++)
 611                                         nvlist_free(child[c]);
 612                                 children = max_id;
 613                         } else if (max_id > children) {
 614                                 nvlist_t **newchild;
 615 
 616                                 newchild = zfs_alloc(hdl, (max_id) *
 617                                     sizeof (nvlist_t *));
 618                                 if (newchild == NULL)
 619                                         goto nomem;
 620 
 621                                 for (c = 0; c < children; c++)
 622                                         newchild[c] = child[c];
 623 
 624                                 free(child);
 625                                 child = newchild;
 626                                 children = max_id;
 627                         }
 628                 }
 629 
 630                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 631                     &guid) == 0);
 632 
 633                 /*
 634                  * The vdev namespace may contain holes as a result of
 635                  * device removal. We must add them back into the vdev
 636                  * tree before we process any missing devices.
 637                  */
 638                 if (holes > 0) {
 639                         ASSERT(valid_top_config);
 640 
 641                         for (c = 0; c < children; c++) {
 642                                 nvlist_t *holey;
 643 
 644                                 if (child[c] != NULL ||
 645                                     !vdev_is_hole(hole_array, holes, c))
 646                                         continue;
 647 
 648                                 if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
 649                                     0) != 0)
 650                                         goto nomem;
 651 
 652                                 /*
 653                                  * Holes in the namespace are treated as
 654                                  * "hole" top-level vdevs and have a
 655                                  * special flag set on them.
 656                                  */
 657                                 if (nvlist_add_string(holey,
 658                                     ZPOOL_CONFIG_TYPE,
 659                                     VDEV_TYPE_HOLE) != 0 ||
 660                                     nvlist_add_uint64(holey,
 661                                     ZPOOL_CONFIG_ID, c) != 0 ||
 662                                     nvlist_add_uint64(holey,
 663                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
 664                                         nvlist_free(holey);
 665                                         goto nomem;
 666                                 }
 667                                 child[c] = holey;
 668                         }
 669                 }
 670 
 671                 /*
 672                  * Look for any missing top-level vdevs.  If this is the case,
 673                  * create a faked up 'missing' vdev as a placeholder.  We cannot
 674                  * simply compress the child array, because the kernel performs
 675                  * certain checks to make sure the vdev IDs match their location
 676                  * in the configuration.
 677                  */
 678                 for (c = 0; c < children; c++) {
 679                         if (child[c] == NULL) {
 680                                 nvlist_t *missing;
 681                                 if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
 682                                     0) != 0)
 683                                         goto nomem;
 684                                 if (nvlist_add_string(missing,
 685                                     ZPOOL_CONFIG_TYPE,
 686                                     VDEV_TYPE_MISSING) != 0 ||
 687                                     nvlist_add_uint64(missing,
 688                                     ZPOOL_CONFIG_ID, c) != 0 ||
 689                                     nvlist_add_uint64(missing,
 690                                     ZPOOL_CONFIG_GUID, 0ULL) != 0) {
 691                                         nvlist_free(missing);
 692                                         goto nomem;
 693                                 }
 694                                 child[c] = missing;
 695                         }
 696                 }
 697 
 698                 /*
 699                  * Put all of this pool's top-level vdevs into a root vdev.
 700                  */
 701                 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
 702                         goto nomem;
 703                 if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
 704                     VDEV_TYPE_ROOT) != 0 ||
 705                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
 706                     nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
 707                     nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
 708                     child, children) != 0) {
 709                         nvlist_free(nvroot);
 710                         goto nomem;
 711                 }
 712 
 713                 for (c = 0; c < children; c++)
 714                         nvlist_free(child[c]);
 715                 free(child);
 716                 children = 0;
 717                 child = NULL;
 718 
 719                 /*
 720                  * Go through and fix up any paths and/or devids based on our
 721                  * known list of vdev GUID -> path mappings.
 722                  */
 723                 if (fix_paths(nvroot, pl->names) != 0) {
 724                         nvlist_free(nvroot);
 725                         goto nomem;
 726                 }
 727 
 728                 /*
 729                  * Add the root vdev to this pool's configuration.
 730                  */
 731                 if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 732                     nvroot) != 0) {
 733                         nvlist_free(nvroot);
 734                         goto nomem;
 735                 }
 736                 nvlist_free(nvroot);
 737 
 738                 /*
 739                  * zdb uses this path to report on active pools that were
 740                  * imported or created using -R.
 741                  */
 742                 if (active_ok)
 743                         goto add_pool;
 744 
 745                 /*
 746                  * Determine if this pool is currently active, in which case we
 747                  * can't actually import it.
 748                  */
 749                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
 750                     &name) == 0);
 751                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
 752                     &guid) == 0);
 753 
 754                 if (pool_active(hdl, name, guid, &isactive) != 0)
 755                         goto error;
 756 
 757                 if (isactive) {
 758                         nvlist_free(config);
 759                         config = NULL;
 760                         continue;
 761                 }
 762 
 763                 if ((nvl = refresh_config(hdl, config)) == NULL) {
 764                         nvlist_free(config);
 765                         config = NULL;
 766                         continue;
 767                 }
 768 
 769                 nvlist_free(config);
 770                 config = nvl;
 771 
 772                 /*
 773                  * Go through and update the paths for spares, now that we have
 774                  * them.
 775                  */
 776                 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
 777                     &nvroot) == 0);
 778                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
 779                     &spares, &nspares) == 0) {
 780                         for (i = 0; i < nspares; i++) {
 781                                 if (fix_paths(spares[i], pl->names) != 0)
 782                                         goto nomem;
 783                         }
 784                 }
 785 
 786                 /*
 787                  * Update the paths for l2cache devices.
 788                  */
 789                 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
 790                     &l2cache, &nl2cache) == 0) {
 791                         for (i = 0; i < nl2cache; i++) {
 792                                 if (fix_paths(l2cache[i], pl->names) != 0)
 793                                         goto nomem;
 794                         }
 795                 }
 796 
 797                 /*
 798                  * Restore the original information read from the actual label.
 799                  */
 800                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
 801                     DATA_TYPE_UINT64);
 802                 (void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
 803                     DATA_TYPE_STRING);
 804                 if (hostid != 0) {
 805                         verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
 806                             hostid) == 0);
 807                         verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
 808                             hostname) == 0);
 809                 }
 810 
 811 add_pool:
 812                 /*
 813                  * Add this pool to the list of configs.
 814                  */
 815                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
 816                     &name) == 0);
 817                 if (nvlist_add_nvlist(ret, name, config) != 0)
 818                         goto nomem;
 819 
 820                 found_one = B_TRUE;
 821                 nvlist_free(config);
 822                 config = NULL;
 823         }
 824 
 825         if (!found_one) {
 826                 nvlist_free(ret);
 827                 ret = NULL;
 828         }
 829 
 830         return (ret);
 831 
 832 nomem:
 833         (void) no_memory(hdl);
 834 error:
 835         nvlist_free(config);
 836         nvlist_free(ret);
 837         for (c = 0; c < children; c++)
 838                 nvlist_free(child[c]);
 839         free(child);
 840 
 841         return (NULL);
 842 }
 843 
 844 /*
 845  * Return the offset of the given label.
 846  */
 847 static uint64_t
 848 label_offset(uint64_t size, int l)
 849 {
 850         ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
 851         return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
 852             0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
 853 }
 854 
 855 /*
 856  * Given a file descriptor, read the label information and return an nvlist
 857  * describing the configuration, if there is one.
 858  */
 859 int
 860 zpool_read_label(int fd, nvlist_t **config)
 861 {
 862         struct stat64 statbuf;
 863         int l;
 864         vdev_label_t *label;
 865         uint64_t state, txg, size;
 866 
 867         *config = NULL;
 868 
 869         if (fstat64(fd, &statbuf) == -1)
 870                 return (0);
 871         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
 872 
 873         if ((label = malloc(sizeof (vdev_label_t))) == NULL)
 874                 return (-1);
 875 
 876         for (l = 0; l < VDEV_LABELS; l++) {
 877                 if (pread64(fd, label, sizeof (vdev_label_t),
 878                     label_offset(size, l)) != sizeof (vdev_label_t))
 879                         continue;
 880 
 881                 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
 882                     sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
 883                         continue;
 884 
 885                 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
 886                     &state) != 0 || state > POOL_STATE_L2CACHE) {
 887                         nvlist_free(*config);
 888                         continue;
 889                 }
 890 
 891                 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
 892                     (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
 893                     &txg) != 0 || txg == 0)) {
 894                         nvlist_free(*config);
 895                         continue;
 896                 }
 897 
 898                 free(label);
 899                 return (0);
 900         }
 901 
 902         free(label);
 903         *config = NULL;
 904         return (0);
 905 }
 906 
 907 typedef struct rdsk_node {
 908         char *rn_name;
 909         int rn_dfd;
 910         libzfs_handle_t *rn_hdl;
 911         nvlist_t *rn_config;
 912         avl_tree_t *rn_avl;
 913         avl_node_t rn_node;
 914         boolean_t rn_nozpool;
 915 } rdsk_node_t;
 916 
 917 static int
 918 slice_cache_compare(const void *arg1, const void *arg2)
 919 {
 920         const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
 921         const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
 922         char *nm1slice, *nm2slice;
 923         int rv;
 924 
 925         /*
 926          * slices zero and two are the most likely to provide results,
 927          * so put those first
 928          */
 929         nm1slice = strstr(nm1, "s0");
 930         nm2slice = strstr(nm2, "s0");
 931         if (nm1slice && !nm2slice) {
 932                 return (-1);
 933         }
 934         if (!nm1slice && nm2slice) {
 935                 return (1);
 936         }
 937         nm1slice = strstr(nm1, "s2");
 938         nm2slice = strstr(nm2, "s2");
 939         if (nm1slice && !nm2slice) {
 940                 return (-1);
 941         }
 942         if (!nm1slice && nm2slice) {
 943                 return (1);
 944         }
 945 
 946         rv = strcmp(nm1, nm2);
 947         if (rv == 0)
 948                 return (0);
 949         return (rv > 0 ? 1 : -1);
 950 }
 951 
 952 static void
 953 check_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
 954     diskaddr_t size, uint_t blksz)
 955 {
 956         rdsk_node_t tmpnode;
 957         rdsk_node_t *node;
 958         char sname[MAXNAMELEN];
 959 
 960         tmpnode.rn_name = &sname[0];
 961         (void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
 962             diskname, partno);
 963         /*
 964          * protect against division by zero for disk labels that
 965          * contain a bogus sector size
 966          */
 967         if (blksz == 0)
 968                 blksz = DEV_BSIZE;
 969         /* too small to contain a zpool? */
 970         if ((size < (SPA_MINDEVSIZE / blksz)) &&
 971             (node = avl_find(r, &tmpnode, NULL)))
 972                 node->rn_nozpool = B_TRUE;
 973 }
 974 
 975 static void
 976 nozpool_all_slices(avl_tree_t *r, const char *sname)
 977 {
 978         char diskname[MAXNAMELEN];
 979         char *ptr;
 980         int i;
 981 
 982         (void) strncpy(diskname, sname, MAXNAMELEN);
 983         if (((ptr = strrchr(diskname, 's')) == NULL) &&
 984             ((ptr = strrchr(diskname, 'p')) == NULL))
 985                 return;
 986         ptr[0] = 's';
 987         ptr[1] = '\0';
 988         for (i = 0; i < NDKMAP; i++)
 989                 check_one_slice(r, diskname, i, 0, 1);
 990         ptr[0] = 'p';
 991         for (i = 0; i <= FD_NUMPART; i++)
 992                 check_one_slice(r, diskname, i, 0, 1);
 993 }
 994 
 995 static void
 996 check_slices(avl_tree_t *r, int fd, const char *sname)
 997 {
 998         struct extvtoc vtoc;
 999         struct dk_gpt *gpt;
1000         char diskname[MAXNAMELEN];
1001         char *ptr;
1002         int i;
1003 
1004         (void) strncpy(diskname, sname, MAXNAMELEN);
1005         if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1006                 return;
1007         ptr[1] = '\0';
1008 
1009         if (read_extvtoc(fd, &vtoc) >= 0) {
1010                 for (i = 0; i < NDKMAP; i++)
1011                         check_one_slice(r, diskname, i,
1012                             vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1013         } else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1014                 /*
1015                  * on x86 we'll still have leftover links that point
1016                  * to slices s[9-15], so use NDKMAP instead
1017                  */
1018                 for (i = 0; i < NDKMAP; i++)
1019                         check_one_slice(r, diskname, i,
1020                             gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1021                 /* nodes p[1-4] are never used with EFI labels */
1022                 ptr[0] = 'p';
1023                 for (i = 1; i <= FD_NUMPART; i++)
1024                         check_one_slice(r, diskname, i, 0, 1);
1025                 efi_free(gpt);
1026         }
1027 }
1028 
1029 static void
1030 zpool_open_func(void *arg)
1031 {
1032         rdsk_node_t *rn = arg;
1033         struct stat64 statbuf;
1034         nvlist_t *config;
1035         int fd;
1036 
1037         if (rn->rn_nozpool)
1038                 return;
1039         if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1040                 /* symlink to a device that's no longer there */
1041                 if (errno == ENOENT)
1042                         nozpool_all_slices(rn->rn_avl, rn->rn_name);
1043                 return;
1044         }
1045         /*
1046          * Ignore failed stats.  We only want regular
1047          * files, character devs and block devs.
1048          */
1049         if (fstat64(fd, &statbuf) != 0 ||
1050             (!S_ISREG(statbuf.st_mode) &&
1051             !S_ISCHR(statbuf.st_mode) &&
1052             !S_ISBLK(statbuf.st_mode))) {
1053                 (void) close(fd);
1054                 return;
1055         }
1056         /* this file is too small to hold a zpool */
1057         if (S_ISREG(statbuf.st_mode) &&
1058             statbuf.st_size < SPA_MINDEVSIZE) {
1059                 (void) close(fd);
1060                 return;
1061         } else if (!S_ISREG(statbuf.st_mode)) {
1062                 /*
1063                  * Try to read the disk label first so we don't have to
1064                  * open a bunch of minor nodes that can't have a zpool.
1065                  */
1066                 check_slices(rn->rn_avl, fd, rn->rn_name);
1067         }
1068 
1069         if ((zpool_read_label(fd, &config)) != 0) {
1070                 (void) close(fd);
1071                 (void) no_memory(rn->rn_hdl);
1072                 return;
1073         }
1074         (void) close(fd);
1075 
1076         rn->rn_config = config;
1077 }
1078 
1079 /*
1080  * Given a file descriptor, clear (zero) the label information.  This function
1081  * is currently only used in the appliance stack as part of the ZFS sysevent
1082  * module.
1083  */
1084 int
1085 zpool_clear_label(int fd)
1086 {
1087         struct stat64 statbuf;
1088         int l;
1089         vdev_label_t *label;
1090         uint64_t size;
1091 
1092         if (fstat64(fd, &statbuf) == -1)
1093                 return (0);
1094         size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1095 
1096         if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1097                 return (-1);
1098 
1099         for (l = 0; l < VDEV_LABELS; l++) {
1100                 if (pwrite64(fd, label, sizeof (vdev_label_t),
1101                     label_offset(size, l)) != sizeof (vdev_label_t)) {
1102                         free(label);
1103                         return (-1);
1104                 }
1105         }
1106 
1107         free(label);
1108         return (0);
1109 }
1110 
1111 /*
1112  * Given a list of directories to search, find all pools stored on disk.  This
1113  * includes partial pools which are not available to import.  If no args are
1114  * given (argc is 0), then the default directory (/dev/dsk) is searched.
1115  * poolname or guid (but not both) are provided by the caller when trying
1116  * to import a specific pool.
1117  */
1118 static nvlist_t *
1119 zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1120 {
1121         int i, dirs = iarg->paths;
1122         struct dirent64 *dp;
1123         char path[MAXPATHLEN];
1124         char *end, **dir = iarg->path;
1125         size_t pathleft;
1126         nvlist_t *ret = NULL;
1127         static char *default_dir = "/dev/dsk";
1128         pool_list_t pools = { 0 };
1129         pool_entry_t *pe, *penext;
1130         vdev_entry_t *ve, *venext;
1131         config_entry_t *ce, *cenext;
1132         name_entry_t *ne, *nenext;
1133         avl_tree_t slice_cache;
1134         rdsk_node_t *slice;
1135         void *cookie;
1136 
1137         if (dirs == 0) {
1138                 dirs = 1;
1139                 dir = &default_dir;
1140         }
1141 
1142         /*
1143          * Go through and read the label configuration information from every
1144          * possible device, organizing the information according to pool GUID
1145          * and toplevel GUID.
1146          */
1147         for (i = 0; i < dirs; i++) {
1148                 tpool_t *t;
1149                 char *rdsk;
1150                 int dfd;
1151                 boolean_t config_failed = B_FALSE;
1152                 DIR *dirp;
1153 
1154                 /* use realpath to normalize the path */
1155                 if (realpath(dir[i], path) == 0) {
1156                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1157                             dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1158                         goto error;
1159                 }
1160                 end = &path[strlen(path)];
1161                 *end++ = '/';
1162                 *end = 0;
1163                 pathleft = &path[sizeof (path)] - end;
1164 
1165                 /*
1166                  * Using raw devices instead of block devices when we're
1167                  * reading the labels skips a bunch of slow operations during
1168                  * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1169                  */
1170                 if (strcmp(path, "/dev/dsk/") == 0)
1171                         rdsk = "/dev/rdsk/";
1172                 else
1173                         rdsk = path;
1174 
1175                 if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1176                     (dirp = fdopendir(dfd)) == NULL) {
1177                         if (dfd >= 0)
1178                                 (void) close(dfd);
1179                         zfs_error_aux(hdl, strerror(errno));
1180                         (void) zfs_error_fmt(hdl, EZFS_BADPATH,
1181                             dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1182                             rdsk);
1183                         goto error;
1184                 }
1185 
1186                 avl_create(&slice_cache, slice_cache_compare,
1187                     sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1188                 /*
1189                  * This is not MT-safe, but we have no MT consumers of libzfs
1190                  */
1191                 while ((dp = readdir64(dirp)) != NULL) {
1192                         const char *name = dp->d_name;
1193                         if (name[0] == '.' &&
1194                             (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1195                                 continue;
1196 
1197                         slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1198                         slice->rn_name = zfs_strdup(hdl, name);
1199                         slice->rn_avl = &slice_cache;
1200                         slice->rn_dfd = dfd;
1201                         slice->rn_hdl = hdl;
1202                         slice->rn_nozpool = B_FALSE;
1203                         avl_add(&slice_cache, slice);
1204                 }
1205                 /*
1206                  * create a thread pool to do all of this in parallel;
1207                  * rn_nozpool is not protected, so this is racy in that
1208                  * multiple tasks could decide that the same slice can
1209                  * not hold a zpool, which is benign.  Also choose
1210                  * double the number of processors; we hold a lot of
1211                  * locks in the kernel, so going beyond this doesn't
1212                  * buy us much.
1213                  */
1214                 t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1215                     0, NULL);
1216                 for (slice = avl_first(&slice_cache); slice;
1217                     (slice = avl_walk(&slice_cache, slice,
1218                     AVL_AFTER)))
1219                         (void) tpool_dispatch(t, zpool_open_func, slice);
1220                 tpool_wait(t);
1221                 tpool_destroy(t);
1222 
1223                 cookie = NULL;
1224                 while ((slice = avl_destroy_nodes(&slice_cache,
1225                     &cookie)) != NULL) {
1226                         if (slice->rn_config != NULL && !config_failed) {
1227                                 nvlist_t *config = slice->rn_config;
1228                                 boolean_t matched = B_TRUE;
1229 
1230                                 if (iarg->poolname != NULL) {
1231                                         char *pname;
1232 
1233                                         matched = nvlist_lookup_string(config,
1234                                             ZPOOL_CONFIG_POOL_NAME,
1235                                             &pname) == 0 &&
1236                                             strcmp(iarg->poolname, pname) == 0;
1237                                 } else if (iarg->guid != 0) {
1238                                         uint64_t this_guid;
1239 
1240                                         matched = nvlist_lookup_uint64(config,
1241                                             ZPOOL_CONFIG_POOL_GUID,
1242                                             &this_guid) == 0 &&
1243                                             iarg->guid == this_guid;
1244                                 }
1245                                 if (!matched) {
1246                                         nvlist_free(config);
1247                                 } else {
1248                                         /*
1249                                          * use the non-raw path for the config
1250                                          */
1251                                         (void) strlcpy(end, slice->rn_name,
1252                                             pathleft);
1253                                         if (add_config(hdl, &pools, path,
1254                                             config) != 0) {
1255                                                 nvlist_free(config);
1256                                                 config_failed = B_TRUE;
1257                                         }
1258                                 }
1259                         }
1260                         free(slice->rn_name);
1261                         free(slice);
1262                 }
1263                 avl_destroy(&slice_cache);
1264 
1265                 (void) closedir(dirp);
1266 
1267                 if (config_failed)
1268                         goto error;
1269         }
1270 
1271         ret = get_configs(hdl, &pools, iarg->can_be_active);
1272 
1273 error:
1274         for (pe = pools.pools; pe != NULL; pe = penext) {
1275                 penext = pe->pe_next;
1276                 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1277                         venext = ve->ve_next;
1278                         for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1279                                 cenext = ce->ce_next;
1280                                 if (ce->ce_config)
1281                                         nvlist_free(ce->ce_config);
1282                                 free(ce);
1283                         }
1284                         free(ve);
1285                 }
1286                 free(pe);
1287         }
1288 
1289         for (ne = pools.names; ne != NULL; ne = nenext) {
1290                 nenext = ne->ne_next;
1291                 free(ne->ne_name);
1292                 free(ne);
1293         }
1294 
1295         return (ret);
1296 }
1297 
1298 nvlist_t *
1299 zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1300 {
1301         importargs_t iarg = { 0 };
1302 
1303         iarg.paths = argc;
1304         iarg.path = argv;
1305 
1306         return (zpool_find_import_impl(hdl, &iarg));
1307 }
1308 
1309 /*
1310  * Given a cache file, return the contents as a list of importable pools.
1311  * poolname or guid (but not both) are provided by the caller when trying
1312  * to import a specific pool.
1313  */
1314 nvlist_t *
1315 zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1316     char *poolname, uint64_t guid)
1317 {
1318         char *buf;
1319         int fd;
1320         struct stat64 statbuf;
1321         nvlist_t *raw, *src, *dst;
1322         nvlist_t *pools;
1323         nvpair_t *elem;
1324         char *name;
1325         uint64_t this_guid;
1326         boolean_t active;
1327 
1328         verify(poolname == NULL || guid == 0);
1329 
1330         if ((fd = open(cachefile, O_RDONLY)) < 0) {
1331                 zfs_error_aux(hdl, "%s", strerror(errno));
1332                 (void) zfs_error(hdl, EZFS_BADCACHE,
1333                     dgettext(TEXT_DOMAIN, "failed to open cache file"));
1334                 return (NULL);
1335         }
1336 
1337         if (fstat64(fd, &statbuf) != 0) {
1338                 zfs_error_aux(hdl, "%s", strerror(errno));
1339                 (void) close(fd);
1340                 (void) zfs_error(hdl, EZFS_BADCACHE,
1341                     dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1342                 return (NULL);
1343         }
1344 
1345         if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1346                 (void) close(fd);
1347                 return (NULL);
1348         }
1349 
1350         if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1351                 (void) close(fd);
1352                 free(buf);
1353                 (void) zfs_error(hdl, EZFS_BADCACHE,
1354                     dgettext(TEXT_DOMAIN,
1355                     "failed to read cache file contents"));
1356                 return (NULL);
1357         }
1358 
1359         (void) close(fd);
1360 
1361         if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1362                 free(buf);
1363                 (void) zfs_error(hdl, EZFS_BADCACHE,
1364                     dgettext(TEXT_DOMAIN,
1365                     "invalid or corrupt cache file contents"));
1366                 return (NULL);
1367         }
1368 
1369         free(buf);
1370 
1371         /*
1372          * Go through and get the current state of the pools and refresh their
1373          * state.
1374          */
1375         if (nvlist_alloc(&pools, 0, 0) != 0) {
1376                 (void) no_memory(hdl);
1377                 nvlist_free(raw);
1378                 return (NULL);
1379         }
1380 
1381         elem = NULL;
1382         while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1383                 src = fnvpair_value_nvlist(elem);
1384 
1385                 name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
1386                 if (poolname != NULL && strcmp(poolname, name) != 0)
1387                         continue;
1388 
1389                 this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
1390                 if (guid != 0 && guid != this_guid)
1391                         continue;
1392 
1393                 if (pool_active(hdl, name, this_guid, &active) != 0) {
1394                         nvlist_free(raw);
1395                         nvlist_free(pools);
1396                         return (NULL);
1397                 }
1398 
1399                 if (active)
1400                         continue;
1401 
1402                 if ((dst = refresh_config(hdl, src)) == NULL) {
1403                         nvlist_free(raw);
1404                         nvlist_free(pools);
1405                         return (NULL);
1406                 }
1407 
1408                 if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1409                         (void) no_memory(hdl);
1410                         nvlist_free(dst);
1411                         nvlist_free(raw);
1412                         nvlist_free(pools);
1413                         return (NULL);
1414                 }
1415                 nvlist_free(dst);
1416         }
1417 
1418         nvlist_free(raw);
1419         return (pools);
1420 }
1421 
1422 static int
1423 name_or_guid_exists(zpool_handle_t *zhp, void *data)
1424 {
1425         importargs_t *import = data;
1426         int found = 0;
1427 
1428         if (import->poolname != NULL) {
1429                 char *pool_name;
1430 
1431                 verify(nvlist_lookup_string(zhp->zpool_config,
1432                     ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1433                 if (strcmp(pool_name, import->poolname) == 0)
1434                         found = 1;
1435         } else {
1436                 uint64_t pool_guid;
1437 
1438                 verify(nvlist_lookup_uint64(zhp->zpool_config,
1439                     ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1440                 if (pool_guid == import->guid)
1441                         found = 1;
1442         }
1443 
1444         zpool_close(zhp);
1445         return (found);
1446 }
1447 
1448 nvlist_t *
1449 zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1450 {
1451         verify(import->poolname == NULL || import->guid == 0);
1452 
1453         if (import->unique)
1454                 import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1455 
1456         if (import->cachefile != NULL)
1457                 return (zpool_find_import_cached(hdl, import->cachefile,
1458                     import->poolname, import->guid));
1459 
1460         return (zpool_find_import_impl(hdl, import));
1461 }
1462 
1463 boolean_t
1464 find_guid(nvlist_t *nv, uint64_t guid)
1465 {
1466         uint64_t tmp;
1467         nvlist_t **child;
1468         uint_t c, children;
1469 
1470         verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1471         if (tmp == guid)
1472                 return (B_TRUE);
1473 
1474         if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1475             &child, &children) == 0) {
1476                 for (c = 0; c < children; c++)
1477                         if (find_guid(child[c], guid))
1478                                 return (B_TRUE);
1479         }
1480 
1481         return (B_FALSE);
1482 }
1483 
1484 typedef struct aux_cbdata {
1485         const char      *cb_type;
1486         uint64_t        cb_guid;
1487         zpool_handle_t  *cb_zhp;
1488 } aux_cbdata_t;
1489 
1490 static int
1491 find_aux(zpool_handle_t *zhp, void *data)
1492 {
1493         aux_cbdata_t *cbp = data;
1494         nvlist_t **list;
1495         uint_t i, count;
1496         uint64_t guid;
1497         nvlist_t *nvroot;
1498 
1499         verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1500             &nvroot) == 0);
1501 
1502         if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1503             &list, &count) == 0) {
1504                 for (i = 0; i < count; i++) {
1505                         verify(nvlist_lookup_uint64(list[i],
1506                             ZPOOL_CONFIG_GUID, &guid) == 0);
1507                         if (guid == cbp->cb_guid) {
1508                                 cbp->cb_zhp = zhp;
1509                                 return (1);
1510                         }
1511                 }
1512         }
1513 
1514         zpool_close(zhp);
1515         return (0);
1516 }
1517 
1518 /*
1519  * Determines if the pool is in use.  If so, it returns true and the state of
1520  * the pool as well as the name of the pool.  Both strings are allocated and
1521  * must be freed by the caller.
1522  */
1523 int
1524 zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1525     boolean_t *inuse)
1526 {
1527         nvlist_t *config;
1528         char *name;
1529         boolean_t ret;
1530         uint64_t guid, vdev_guid;
1531         zpool_handle_t *zhp;
1532         nvlist_t *pool_config;
1533         uint64_t stateval, isspare;
1534         aux_cbdata_t cb = { 0 };
1535         boolean_t isactive;
1536 
1537         *inuse = B_FALSE;
1538 
1539         if (zpool_read_label(fd, &config) != 0) {
1540                 (void) no_memory(hdl);
1541                 return (-1);
1542         }
1543 
1544         if (config == NULL)
1545                 return (0);
1546 
1547         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1548             &stateval) == 0);
1549         verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1550             &vdev_guid) == 0);
1551 
1552         if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1553                 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1554                     &name) == 0);
1555                 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1556                     &guid) == 0);
1557         }
1558 
1559         switch (stateval) {
1560         case POOL_STATE_EXPORTED:
1561                 /*
1562                  * A pool with an exported state may in fact be imported
1563                  * read-only, so check the in-core state to see if it's
1564                  * active and imported read-only.  If it is, set
1565                  * its state to active.
1566                  */
1567                 if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1568                     (zhp = zpool_open_canfail(hdl, name)) != NULL) {
1569                         if (zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1570                                 stateval = POOL_STATE_ACTIVE;
1571 
1572                         /*
1573                          * All we needed the zpool handle for is the
1574                          * readonly prop check.
1575                          */
1576                         zpool_close(zhp);
1577                 }
1578 
1579                 ret = B_TRUE;
1580                 break;
1581 
1582         case POOL_STATE_ACTIVE:
1583                 /*
1584                  * For an active pool, we have to determine if it's really part
1585                  * of a currently active pool (in which case the pool will exist
1586                  * and the guid will be the same), or whether it's part of an
1587                  * active pool that was disconnected without being explicitly
1588                  * exported.
1589                  */
1590                 if (pool_active(hdl, name, guid, &isactive) != 0) {
1591                         nvlist_free(config);
1592                         return (-1);
1593                 }
1594 
1595                 if (isactive) {
1596                         /*
1597                          * Because the device may have been removed while
1598                          * offlined, we only report it as active if the vdev is
1599                          * still present in the config.  Otherwise, pretend like
1600                          * it's not in use.
1601                          */
1602                         if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1603                             (pool_config = zpool_get_config(zhp, NULL))
1604                             != NULL) {
1605                                 nvlist_t *nvroot;
1606 
1607                                 verify(nvlist_lookup_nvlist(pool_config,
1608                                     ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1609                                 ret = find_guid(nvroot, vdev_guid);
1610                         } else {
1611                                 ret = B_FALSE;
1612                         }
1613 
1614                         /*
1615                          * If this is an active spare within another pool, we
1616                          * treat it like an unused hot spare.  This allows the
1617                          * user to create a pool with a hot spare that currently
1618                          * in use within another pool.  Since we return B_TRUE,
1619                          * libdiskmgt will continue to prevent generic consumers
1620                          * from using the device.
1621                          */
1622                         if (ret && nvlist_lookup_uint64(config,
1623                             ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1624                                 stateval = POOL_STATE_SPARE;
1625 
1626                         if (zhp != NULL)
1627                                 zpool_close(zhp);
1628                 } else {
1629                         stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1630                         ret = B_TRUE;
1631                 }
1632                 break;
1633 
1634         case POOL_STATE_SPARE:
1635                 /*
1636                  * For a hot spare, it can be either definitively in use, or
1637                  * potentially active.  To determine if it's in use, we iterate
1638                  * over all pools in the system and search for one with a spare
1639                  * with a matching guid.
1640                  *
1641                  * Due to the shared nature of spares, we don't actually report
1642                  * the potentially active case as in use.  This means the user
1643                  * can freely create pools on the hot spares of exported pools,
1644                  * but to do otherwise makes the resulting code complicated, and
1645                  * we end up having to deal with this case anyway.
1646                  */
1647                 cb.cb_zhp = NULL;
1648                 cb.cb_guid = vdev_guid;
1649                 cb.cb_type = ZPOOL_CONFIG_SPARES;
1650                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1651                         name = (char *)zpool_get_name(cb.cb_zhp);
1652                         ret = B_TRUE;
1653                 } else {
1654                         ret = B_FALSE;
1655                 }
1656                 break;
1657 
1658         case POOL_STATE_L2CACHE:
1659 
1660                 /*
1661                  * Check if any pool is currently using this l2cache device.
1662                  */
1663                 cb.cb_zhp = NULL;
1664                 cb.cb_guid = vdev_guid;
1665                 cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1666                 if (zpool_iter(hdl, find_aux, &cb) == 1) {
1667                         name = (char *)zpool_get_name(cb.cb_zhp);
1668                         ret = B_TRUE;
1669                 } else {
1670                         ret = B_FALSE;
1671                 }
1672                 break;
1673 
1674         default:
1675                 ret = B_FALSE;
1676         }
1677 
1678 
1679         if (ret) {
1680                 if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1681                         if (cb.cb_zhp)
1682                                 zpool_close(cb.cb_zhp);
1683                         nvlist_free(config);
1684                         return (-1);
1685                 }
1686                 *state = (pool_state_t)stateval;
1687         }
1688 
1689         if (cb.cb_zhp)
1690                 zpool_close(cb.cb_zhp);
1691 
1692         nvlist_free(config);
1693         *inuse = ret;
1694         return (0);
1695 }