223
224 /*
225 * If this is a hot spare not currently in use or level 2 cache
226 * device, add it to the list of names to translate, but don't do
227 * anything else.
228 */
229 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
230 &state) == 0 &&
231 (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
232 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
233 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
234 return (-1);
235
236 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
237 free(ne);
238 return (-1);
239 }
240 ne->ne_guid = vdev_guid;
241 ne->ne_next = pl->names;
242 pl->names = ne;
243 return (0);
244 }
245
246 /*
247 * If we have a valid config but cannot read any of these fields, then
248 * it means we have a half-initialized label. In vdev_label_init()
249 * we write a label with txg == 0 so that we can identify the device
250 * in case the user refers to the same disk later on. If we fail to
251 * create the pool, we'll be left with a label in this state
252 * which should not be considered part of a valid pool.
253 */
254 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
255 &pool_guid) != 0 ||
256 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
257 &vdev_guid) != 0 ||
258 nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
259 &top_guid) != 0 ||
260 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
261 &txg) != 0 || txg == 0) {
262 nvlist_free(config);
263 return (0);
264 }
265
266 /*
267 * First, see if we know about this pool. If not, then add it to the
268 * list of known pools.
269 */
270 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
271 if (pe->pe_guid == pool_guid)
272 break;
273 }
274
275 if (pe == NULL) {
276 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
277 nvlist_free(config);
278 return (-1);
279 }
280 pe->pe_guid = pool_guid;
281 pe->pe_next = pl->pools;
282 pl->pools = pe;
283 }
284
285 /*
286 * Second, see if we know about this toplevel vdev. Add it if its
287 * missing.
288 */
289 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
290 if (ve->ve_guid == top_guid)
291 break;
292 }
293
294 if (ve == NULL) {
295 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
296 nvlist_free(config);
297 return (-1);
298 }
299 ve->ve_guid = top_guid;
300 ve->ve_next = pe->pe_vdevs;
301 pe->pe_vdevs = ve;
302 }
303
304 /*
305 * Third, see if we have a config with a matching transaction group. If
306 * so, then we do nothing. Otherwise, add it to the list of known
307 * configs.
308 */
309 for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
310 if (ce->ce_txg == txg)
311 break;
312 }
313
314 if (ce == NULL) {
315 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
316 nvlist_free(config);
317 return (-1);
318 }
319 ce->ce_txg = txg;
320 ce->ce_config = config;
321 ce->ce_next = ve->ve_configs;
322 ve->ve_configs = ce;
323 } else {
324 nvlist_free(config);
325 }
326
327 /*
328 * At this point we've successfully added our config to the list of
329 * known configs. The last thing to do is add the vdev guid -> path
330 * mappings so that we can fix up the configuration as necessary before
331 * doing the import.
332 */
333 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
334 return (-1);
335
336 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
337 free(ne);
338 return (-1);
339 }
340
341 ne->ne_guid = vdev_guid;
342 ne->ne_next = pl->names;
343 pl->names = ne;
344
345 return (0);
346 }
347
348 /*
349 * Returns true if the named pool matches the given GUID.
350 */
351 static int
352 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
353 boolean_t *isactive)
354 {
355 zpool_handle_t *zhp;
356 uint64_t theguid;
357
358 if (zpool_open_silent(hdl, name, &zhp) != 0)
359 return (-1);
360
361 if (zhp == NULL) {
362 *isactive = B_FALSE;
363 return (0);
364 }
1241 ZPOOL_CONFIG_POOL_NAME,
1242 &pname) == 0 &&
1243 strcmp(iarg->poolname, pname) == 0;
1244 } else if (iarg->guid != 0) {
1245 uint64_t this_guid;
1246
1247 matched = nvlist_lookup_uint64(config,
1248 ZPOOL_CONFIG_POOL_GUID,
1249 &this_guid) == 0 &&
1250 iarg->guid == this_guid;
1251 }
1252 if (!matched) {
1253 nvlist_free(config);
1254 } else {
1255 /*
1256 * use the non-raw path for the config
1257 */
1258 (void) strlcpy(end, slice->rn_name,
1259 pathleft);
1260 if (add_config(hdl, &pools, path,
1261 config) != 0)
1262 config_failed = B_TRUE;
1263 }
1264 }
1265 free(slice->rn_name);
1266 free(slice);
1267 }
1268 avl_destroy(&slice_cache);
1269
1270 (void) closedir(dirp);
1271
1272 if (config_failed)
1273 goto error;
1274 }
1275
1276 ret = get_configs(hdl, &pools, iarg->can_be_active);
1277
1278 error:
1279 for (pe = pools.pools; pe != NULL; pe = penext) {
1280 penext = pe->pe_next;
1281 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1282 venext = ve->ve_next;
|
223
224 /*
225 * If this is a hot spare not currently in use or level 2 cache
226 * device, add it to the list of names to translate, but don't do
227 * anything else.
228 */
229 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
230 &state) == 0 &&
231 (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
232 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
233 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
234 return (-1);
235
236 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
237 free(ne);
238 return (-1);
239 }
240 ne->ne_guid = vdev_guid;
241 ne->ne_next = pl->names;
242 pl->names = ne;
243 nvlist_free(config);
244 return (0);
245 }
246
247 /*
248 * If we have a valid config but cannot read any of these fields, then
249 * it means we have a half-initialized label. In vdev_label_init()
250 * we write a label with txg == 0 so that we can identify the device
251 * in case the user refers to the same disk later on. If we fail to
252 * create the pool, we'll be left with a label in this state
253 * which should not be considered part of a valid pool.
254 */
255 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
256 &pool_guid) != 0 ||
257 nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
258 &vdev_guid) != 0 ||
259 nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
260 &top_guid) != 0 ||
261 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
262 &txg) != 0 || txg == 0) {
263 nvlist_free(config);
264 return (0);
265 }
266
267 /*
268 * First, see if we know about this pool. If not, then add it to the
269 * list of known pools.
270 */
271 for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
272 if (pe->pe_guid == pool_guid)
273 break;
274 }
275
276 if (pe == NULL) {
277 if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL)
278 return (-1);
279 pe->pe_guid = pool_guid;
280 pe->pe_next = pl->pools;
281 pl->pools = pe;
282 }
283
284 /*
285 * Second, see if we know about this toplevel vdev. Add it if its
286 * missing.
287 */
288 for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
289 if (ve->ve_guid == top_guid)
290 break;
291 }
292
293 if (ve == NULL) {
294 if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL)
295 return (-1);
296 ve->ve_guid = top_guid;
297 ve->ve_next = pe->pe_vdevs;
298 pe->pe_vdevs = ve;
299 }
300
301 /*
302 * Third, add the vdev guid -> path mappings so that we can fix up
303 * the configuration as necessary before doing the import.
304 */
305 if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
306 return (-1);
307
308 if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
309 free(ne);
310 return (-1);
311 }
312
313 ne->ne_guid = vdev_guid;
314 ne->ne_next = pl->names;
315 pl->names = ne;
316
317 /*
318 * Finally, see if we have a config with a matching transaction
319 * group. If so, then we do nothing. Otherwise, add it to the list
320 * of known configs.
321 */
322 for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
323 if (ce->ce_txg == txg)
324 break;
325 }
326
327 if (ce == NULL) {
328 if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL)
329 return (-1);
330 ce->ce_txg = txg;
331 ce->ce_config = config;
332 ce->ce_next = ve->ve_configs;
333 ve->ve_configs = ce;
334 } else {
335 nvlist_free(config);
336 }
337
338 return (0);
339 }
340
341 /*
342 * Returns true if the named pool matches the given GUID.
343 */
344 static int
345 pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
346 boolean_t *isactive)
347 {
348 zpool_handle_t *zhp;
349 uint64_t theguid;
350
351 if (zpool_open_silent(hdl, name, &zhp) != 0)
352 return (-1);
353
354 if (zhp == NULL) {
355 *isactive = B_FALSE;
356 return (0);
357 }
1234 ZPOOL_CONFIG_POOL_NAME,
1235 &pname) == 0 &&
1236 strcmp(iarg->poolname, pname) == 0;
1237 } else if (iarg->guid != 0) {
1238 uint64_t this_guid;
1239
1240 matched = nvlist_lookup_uint64(config,
1241 ZPOOL_CONFIG_POOL_GUID,
1242 &this_guid) == 0 &&
1243 iarg->guid == this_guid;
1244 }
1245 if (!matched) {
1246 nvlist_free(config);
1247 } else {
1248 /*
1249 * use the non-raw path for the config
1250 */
1251 (void) strlcpy(end, slice->rn_name,
1252 pathleft);
1253 if (add_config(hdl, &pools, path,
1254 config) != 0) {
1255 nvlist_free(config);
1256 config_failed = B_TRUE;
1257 }
1258 }
1259 }
1260 free(slice->rn_name);
1261 free(slice);
1262 }
1263 avl_destroy(&slice_cache);
1264
1265 (void) closedir(dirp);
1266
1267 if (config_failed)
1268 goto error;
1269 }
1270
1271 ret = get_configs(hdl, &pools, iarg->can_be_active);
1272
1273 error:
1274 for (pe = pools.pools; pe != NULL; pe = penext) {
1275 penext = pe->pe_next;
1276 for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1277 venext = ve->ve_next;
|